repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
adanet | adanet-master/adanet/experimental/keras/ensemble_model_test.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adanet.experimental.keras.EnsembleModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet.experimental.keras import testing_utils
from adanet.experimental.keras.ensemble_model import MeanEnsemble
from adanet.experimental.keras.ensemble_model import WeightedEnsemble
import tensorflow.compat.v2 as tf
class EnsembleModelTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'mean_ensemble',
'ensemble': MeanEnsemble,
'want_results': [0.07671691, 0.20448962],
}, {
'testcase_name': 'weighted_ensemble',
'ensemble': WeightedEnsemble,
'output_units': 2,
'want_results': [0.42579408, 0.53439462],
})
def test_lifecycle(self, ensemble, want_results, output_units=None):
train_dataset, test_dataset = testing_utils.get_holdout_data(
train_samples=128,
test_samples=64,
input_shape=(10,),
num_classes=2,
random_seed=42)
# TODO: Consider performing `tf.data.Dataset` transformations
# within get_test_data function.
train_dataset = train_dataset.batch(32).repeat(10)
test_dataset = test_dataset.batch(32).repeat(10)
model1 = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(2),
])
model1.compile(
optimizer=tf.keras.optimizers.Adam(0.01),
loss='mse')
model1.fit(train_dataset)
model1.trainable = False # Since models inside ensemble should be trained.
model1_pre_train_weights = model1.get_weights()
model2 = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(2),
])
model2.compile(
optimizer=tf.keras.optimizers.Adam(0.01),
loss='mse')
model2.fit(train_dataset)
model2.trainable = False # Since models inside ensemble should be trained.
model2_pre_train_weights = model2.get_weights()
if output_units:
ensemble = ensemble(submodels=[model1, model2],
output_units=output_units)
else:
ensemble = ensemble(submodels=[model1, model2])
ensemble.compile(
optimizer=tf.keras.optimizers.Adam(0.01),
loss='mse',
metrics=['mae'])
ensemble.fit(train_dataset)
# Make sure submodel weights were not altered during ensemble training.
model1_post_train_weights = model1.get_weights()
model2_post_train_weights = model2.get_weights()
self.assertAllClose(model1_pre_train_weights, model1_post_train_weights)
self.assertAllClose(model2_pre_train_weights, model2_post_train_weights)
eval_results = ensemble.evaluate(test_dataset)
self.assertAllClose(eval_results, want_results)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 3,693 | 34.519231 | 79 | py |
adanet | adanet-master/adanet/experimental/keras/model_search_test.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adanet.experimental.keras.ModelSearch."""
import os
import shutil
import sys
import time
from absl import flags
from absl.testing import parameterized
from adanet.experimental.controllers.sequential_controller import SequentialController
from adanet.experimental.keras import testing_utils
from adanet.experimental.keras.ensemble_model import MeanEnsemble
from adanet.experimental.keras.model_search import ModelSearch
from adanet.experimental.phases.autoensemble_phase import AutoEnsemblePhase
from adanet.experimental.phases.autoensemble_phase import GrowStrategy
from adanet.experimental.phases.autoensemble_phase import MeanEnsembler
from adanet.experimental.phases.input_phase import InputPhase
from adanet.experimental.phases.keras_trainer_phase import KerasTrainerPhase
from adanet.experimental.phases.keras_tuner_phase import KerasTunerPhase
from adanet.experimental.phases.repeat_phase import RepeatPhase
from adanet.experimental.storages.in_memory_storage import InMemoryStorage
from kerastuner import tuners
import tensorflow.compat.v2 as tf
class ModelSearchTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(ModelSearchTest, self).setUp()
# Setup and cleanup test directory.
# Flags are not automatically parsed at this point.
flags.FLAGS(sys.argv)
self.test_subdirectory = os.path.join(flags.FLAGS.test_tmpdir, self.id())
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
os.makedirs(self.test_subdirectory)
def tearDown(self):
super(ModelSearchTest, self).tearDown()
shutil.rmtree(self.test_subdirectory, ignore_errors=True)
def test_phases_end_to_end(self):
train_dataset, test_dataset = testing_utils.get_holdout_data(
train_samples=128,
test_samples=64,
input_shape=(10,),
num_classes=10,
random_seed=42)
# TODO: Consider performing `tf.data.Dataset` transformations
# within get_test_data function.
train_dataset = train_dataset.batch(32)
test_dataset = test_dataset.batch(32)
model1 = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
model1.compile(
optimizer=tf.keras.optimizers.Adam(0.01), loss='mse', metrics=['mae'])
model2 = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10),
])
model2.compile(
optimizer=tf.keras.optimizers.Adam(0.01), loss='mse', metrics=['mae'])
# TODO: This test could potentially have the best model be
# a non-ensemble Keras model. Therefore, need to address this issue and
# remove the freeze_submodels flag.
ensemble = MeanEnsemble(submodels=[model1, model2], freeze_submodels=False)
ensemble.compile(
optimizer=tf.keras.optimizers.Adam(0.01), loss='mse', metrics=['mae'])
controller = SequentialController(phases=[
InputPhase(train_dataset, test_dataset),
KerasTrainerPhase([model1, model2]),
KerasTrainerPhase([ensemble]),
])
model_search = ModelSearch(controller)
model_search.run()
self.assertIsInstance(
model_search.get_best_models(num_models=1)[0], MeanEnsemble)
def test_tuner_end_to_end(self):
train_dataset, test_dataset = testing_utils.get_holdout_data(
train_samples=128,
test_samples=64,
input_shape=(10,),
num_classes=10,
random_seed=42)
# TODO: Consider performing `tf.data.Dataset` transformations
# within get_holdout_data function.
train_dataset = train_dataset.batch(32)
test_dataset = test_dataset.batch(32)
def build_model(hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.Dense(
units=hp.Int('units', min_value=32, max_value=512, step=32),
activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# Define phases.
tuner = tuners.RandomSearch(
build_model,
objective='val_accuracy',
max_trials=3,
executions_per_trial=1,
directory=self.test_subdirectory,
project_name='helloworld_tuner',
overwrite=True)
tuner_phase = KerasTunerPhase(tuner)
def build_ensemble():
ensemble = MeanEnsemble(
submodels=tuner_phase.get_best_models(num_models=2))
ensemble.compile(
optimizer=tf.keras.optimizers.Adam(0.01), loss='mse', metrics=['mae'])
return [ensemble]
ensemble_phase = KerasTrainerPhase(build_ensemble)
input_phase = InputPhase(train_dataset, test_dataset)
controller = SequentialController(phases=[input_phase,
tuner_phase,
ensemble_phase])
# Execute phases.
model_search = ModelSearch(controller)
model_search.run()
self.assertIsInstance(
model_search.get_best_models(num_models=1)[0], MeanEnsemble)
def test_autoensemble_end_to_end(self):
train_dataset, test_dataset = testing_utils.get_holdout_data(
train_samples=128,
test_samples=64,
input_shape=(10,),
num_classes=10,
random_seed=42)
# TODO: Consider performing `tf.data.Dataset` transformations
# within get_holdout_data function.
train_dataset = train_dataset.batch(32)
test_dataset = test_dataset.batch(32)
def build_model(hp):
model = tf.keras.Sequential()
model.add(
tf.keras.layers.Dense(
units=hp.Int('units', min_value=32, max_value=512, step=32),
activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# This allows us to have a shared storage for all the autoensemble phases
# that occur in the repeat phase.
autoensemble_storage = InMemoryStorage()
input_phase = InputPhase(train_dataset, test_dataset)
# pylint: disable=g-long-lambda
repeat_phase = RepeatPhase(
[
lambda: KerasTunerPhase(
tuners.RandomSearch(
build_model,
objective='val_accuracy',
max_trials=3,
executions_per_trial=1,
directory=self.test_subdirectory,
project_name='helloworld_' + str(int(time.time())),
overwrite=True)),
lambda: AutoEnsemblePhase(
ensemblers=[
MeanEnsembler('sparse_categorical_crossentropy', 'adam',
['accuracy'])
],
ensemble_strategies=[GrowStrategy()],
storage=autoensemble_storage)
], repetitions=3)
# pylint: enable=g-long-lambda
controller = SequentialController(phases=[input_phase, repeat_phase])
model_search = ModelSearch(controller)
model_search.run()
self.assertIsInstance(
model_search.get_best_models(num_models=1)[0], MeanEnsemble)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 8,219 | 35.533333 | 86 | py |
adanet | adanet-master/adanet/experimental/keras/model_search.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An AdaNet interface for model search."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Sequence
from adanet.experimental.controllers.controller import Controller
from adanet.experimental.schedulers.in_process_scheduler import InProcessScheduler
from adanet.experimental.schedulers.scheduler import Scheduler
import tensorflow.compat.v2 as tf
class ModelSearch(object):
"""An AutoML pipeline manager."""
def __init__(self,
controller: Controller,
scheduler: Scheduler = InProcessScheduler()):
"""Initializes a ModelSearch.
Args:
controller: A `Controller` instance.
scheduler: A `Scheduler` instance.
"""
self._controller = controller
self._scheduler = scheduler
def run(self):
"""Executes the training workflow to generate models."""
self._scheduler.schedule(self._controller.work_units())
def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:
"""Returns the top models from the run."""
return self._controller.get_best_models(num_models)
| 1,757 | 32.807692 | 82 | py |
adanet | adanet-master/adanet/experimental/keras/__init__.py | # Lint as: python3
# Copyright 2020 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdaNet Keras models."""
from adanet.experimental.keras.ensemble_model import EnsembleModel
from adanet.experimental.keras.ensemble_model import MeanEnsemble
from adanet.experimental.keras.ensemble_model import WeightedEnsemble
from adanet.experimental.keras.model_search import ModelSearch
__all__ = [
"EnsembleModel",
"MeanEnsemble",
"WeightedEnsemble",
"ModelSearch",
]
| 1,015 | 34.034483 | 74 | py |
adanet | adanet-master/adanet/experimental/controllers/sequential_controller.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A manual controller for model search."""
from typing import Iterator, Sequence
from adanet.experimental.controllers.controller import Controller
from adanet.experimental.phases.phase import ModelProvider
from adanet.experimental.phases.phase import Phase
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class SequentialController(Controller):
"""A controller where the user specifies the sequences of phase to execute."""
# TODO: Add checks to make sure phases are valid.
def __init__(self, phases: Sequence[Phase]):
"""Initializes a SequentialController.
Args:
phases: A list of `Phase` instances.
"""
self._phases = phases
def work_units(self) -> Iterator[WorkUnit]:
previous_phase = None
for phase in self._phases:
for work_unit in phase.work_units(previous_phase):
yield work_unit
previous_phase = phase
def get_best_models(self, num_models: int) -> Sequence[tf.keras.Model]:
final_phase = self._phases[-1]
if isinstance(final_phase, ModelProvider):
return self._phases[-1].get_best_models(num_models)
raise RuntimeError('Final phase does not provide models.')
| 1,826 | 35.54 | 80 | py |
adanet | adanet-master/adanet/experimental/controllers/controller.py | # Lint as: python3
# Copyright 2019 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The AutoML controller for AdaNet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from typing import Iterator, Sequence
from adanet.experimental.work_units.work_unit import WorkUnit
import tensorflow.compat.v2 as tf
class Controller(abc.ABC):
"""Defines the machine learning workflow to produce high-quality models."""
@abc.abstractmethod
def work_units(self) -> Iterator[WorkUnit]:
"""Yields `WorkUnit` instances."""
pass
@abc.abstractmethod
def get_best_models(self, num_models) -> Sequence[tf.keras.Model]:
"""Returns the top models produced from executing the controller."""
pass
| 1,316 | 31.925 | 77 | py |
adanet | adanet-master/adanet/tf_compat/__init__.py | # Copyright 2018 The AdaNet Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow major version compatibility code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from distutils.version import LooseVersion
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf_v2
# pylint: disable=unused-import
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python import tf2
from tensorflow.python.keras.metrics import Metric
from tensorflow.python.tpu import tpu_function
from tensorflow_estimator.python.estimator.head import regression_head
# pylint: enable=g-direct-tensorflow-import
# pylint: enable=unused-import
DatasetV1 = tf.compat.v1.data.Dataset
DatasetV2 = tf.compat.v2.data.Dataset
v1 = tf.compat.v1
v2 = tf.compat.v2
try:
SessionRunHook = tf.estimator.SessionRunHook
except AttributeError:
SessionRunHook = tf.train.SessionRunHook
try:
SessionRunArgs = tf.estimator.SessionRunArgs
except AttributeError:
SessionRunArgs = tf.train.SessionRunArgs
try:
SummarySaverHook = tf.estimator.SummarySaverHook
except AttributeError:
SummarySaverHook = tf.train.SummarySaverHook
try:
CheckpointSaverHook = tf.estimator.CheckpointSaverHook
except AttributeError:
CheckpointSaverHook = tf.train.CheckpointSaverHook
try:
# Loss reduction strings change between TF 1.13 and TF 1.14, which causes
# Heads to raise errors.
regression_head.RegressionHead(
loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
SUM_OVER_BATCH_SIZE = tf.losses.Reduction.SUM_OVER_BATCH_SIZE
SUM = tf.losses.Reduction.SUM
except ValueError:
SUM_OVER_BATCH_SIZE = "sum_over_batch_size"
SUM = "sum"
def tensor_name(tensor):
"""Returns the Tensor's name.
Tensor names always have the structure <op_name>:<int>. This method
returns the portion before the ':'.
Args:
tensor: Tensor.
Returns:
String name of the Tensor.
"""
return tensor.name.split(":")[-2]
def version_greater_or_equal(semver):
"""Returns whether the current TF version is >= to semver string."""
try:
tf_version = tf.version.VERSION
except AttributeError:
tf_version = tf.VERSION
return LooseVersion(tf_version) >= LooseVersion(semver)
def make_one_shot_iterator(dataset):
"""Returns a dataset's one-shot iterator."""
try:
return v1.data.make_one_shot_iterator(dataset)
except AttributeError:
return dataset.make_one_shot_iterator()
def random_normal(*args, **kwargs):
"""Returns a random normal distribution Tensor."""
try:
return tf.random.normal(*args, **kwargs)
except AttributeError:
return tf.random_normal(*args, **kwargs)
def metric_op(metric):
"""Converts Keras metrics into a metric op tuple.
NOTE: If this method is called in for loop, the runtime is O(n^2). However
the number of eval metrics at any given time should be small enough that
this does not affect performance. Any impact is only during graph construction
time, and therefore has no effect on steps/s.
Args:
metric: Either a `tf.keras.metric.Metric` instance or a tuple of Tensor
value and update op.
Returns:
A tuple of metric Tensor value and update op.
"""
if not isinstance(metric, tf.keras.metrics.Metric):
return metric
vars_to_add = {}
for var in metric.variables:
vars_to_add[_hashable_var_key(var)] = var
metric = (metric.result(), metric.updates[0])
_update_variable_collection(v1.GraphKeys.LOCAL_VARIABLES, vars_to_add)
_update_variable_collection(v1.GraphKeys.METRIC_VARIABLES, vars_to_add)
return metric
def _hashable_var_key(var):
"""Returns a hashable key to identify the given Variable."""
# In TF 2, Variables themselves are not hashable, so cannot be dict keys.
# Error is "Tensor is unhashable if Tensor equality is enabled. Instead, use
# tensor.experimental_ref() as the key". For a related issue, see:
# https://github.com/tensorflow/tensorflow/issues/32139
ref_op = getattr(var, "experimental_ref", None)
if callable(ref_op):
return ref_op()
return var
def _update_variable_collection(collection_name, vars_to_add):
"""Add variables to collection."""
collection = {}
for var in v1.get_collection(collection_name):
collection[_hashable_var_key(var)] = var
# Skip variables that are in the collection already: O(n) runtime.
for var_ref in vars_to_add:
if var_ref in collection:
continue
v1.add_to_collection(collection_name, vars_to_add[var_ref])
def skip_for_tf2(f):
"""Decorator that skips tests when using TensorFlow 2."""
def test_wrapper(*args, **kwargs):
"""Wraps the decorated function to determine whether to skip."""
# Extract test case instance from args.
self = args[0]
try:
# If tf.contrib doesn't exist, we are in TF 2.0.
_ = tf.contrib
_ = tf.contrib.estimator.regression_head(
loss_reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE)
except (AttributeError, ImportError):
self.skipTest("Skipping test in TF 2.0.")
return f(*args, **kwargs)
return test_wrapper
def skip_for_tf1(f):
"""Decorator that skips tests when using TensorFlow 1."""
def test_wrapper(*args, **kwargs):
"""Wraps the decorated function to determine whether to skip."""
# Extract test case instance from args.
self = args[0]
try:
# If tf.contrib doesn't exist, we are in TF 2.0.
_ = tf_v2.contrib
except (AttributeError, ImportError):
return f(*args, **kwargs)
self.skipTest("Skipping test in TF 1.0.")
return f(*args, **kwargs)
return test_wrapper
def is_v2_behavior_enabled():
"""Returns if user called tf.enable_v2_behavior."""
# Since there is no actual tf.is_v2_behavior enabled, check that the
# settings were enabled.
return tf2.enabled()
def load_variable(checkpoint_path, var_name, shape, dtype):
"""Loads a variable from a given checkpoint."""
with tf.Graph().as_default():
variable = v1.get_variable(
var_name,
shape=shape,
dtype=dtype,
initializer=v1.zeros_initializer(),
trainable=False)
trackable_vars = {var_name: variable}
checkpoint = v2.train.Checkpoint(**trackable_vars)
status = checkpoint.restore(checkpoint_path)
status.expect_partial()
with v1.Session() as session:
status.initialize_or_restore(session)
return session.run(variable)
| 6,952 | 29.362445 | 80 | py |
DsGammaAnalysis | DsGammaAnalysis-main/ml_tool/ml_tool/__main__.py | import sys
import traceback
import argparse
from datetime import datetime
from pathlib import Path
from .dataset import DataSet, BackgroundMode
def parse_arguments(args) -> argparse.Namespace:
parser = argparse.ArgumentParser(prog='ml_tool')
subparsers = parser.add_subparsers(help='This tool has several modes.', dest="subtool")
train_parser = subparsers.add_parser("train", help="Train ML models")
train_parser.add_argument("-d", "--data-directory", type=str, default="data", help="Where to load data files from")
train_parser.add_argument("-m", "--model-directory", type=str, default="models", help="Where to store model files")
train_parser.add_argument("-j", "--config-file", type=str, default="default_model.json", help="json file with config options")
##plot...
plot_parser = subparsers.add_parser("plot", help="Plot ML models")
plot_parser.add_argument("-d", "--data-directory", type=str, default="data", help="Where to load data files from")
plot_parser.add_argument("-m", "--model-directory", type=str, default="models", help="Where to load model files")
plot_parser.add_argument("-p", "--plot-directory", type=str, default="plots", help="Where to store plot images")
plot_parser.add_argument("-i", "--images", action="store_true", default=False, help="Run with convolutional images.")
group2 = plot_parser.add_mutually_exclusive_group()
group2.add_argument("--test-qq", action='store_true', help="Test on qq only")
group2.add_argument("--test-gg", action='store_true', help="Test on gg only")
##compariplot...
compariplot = subparsers.add_parser("compariplot", help="Make an overview of models as a seaborn swarmplot")
compariplot.add_argument("-m", "--model-directory", type=str, default="models", help="Where to load model files")
compariplot.add_argument("-p", "--plot-directory", type=str, default="plots", help="Where to store plot images")
compariplot.add_argument("-r", "--range", type=float, nargs=2, default=None, help="Y-axis range")
compariplot.add_argument("-c", "--constraint", action='append', type=str, nargs=2, help="constraints on variables")
compariplot.add_argument("category", type=str, help="Category for the X axis")
compariplot.add_argument("variable", type=str, help="Variable out of metadata which to put on the Y axis")
compariplot.add_argument("-o", "--color-category", type=str, help="colour of points category")
compariplot.add_argument("-f", "--filename", type=str, default="", help="output plot filename")
compariplot.add_argument("-s", "--markersize", type=float, default=3, help="markersize")
##tabulate
tabulate_parser = subparsers.add_parser("tabulate", help="Tabulate ML models")
tabulate_parser.add_argument("-m", "--model-directory", type=str, default="models", help="Where to load model files")
tabulate_parser.add_argument("variable", type=str, help="Variable name")
##correlate
correlate_parser = subparsers.add_parser("correlate", help="Correlate 2 ML models")
correlate_parser.add_argument("-d", "--data-directory", type=str, default="data", help="Where to load data files from")
correlate_parser.add_argument("-m1", "--model1", type=str, help="Model 1")
correlate_parser.add_argument("-m2", "--model2", type=str, help="Model 2")
##reevaluate
reevaluate_parser = subparsers.add_parser("reevaluate", help="Re-evaluate ML models")
reevaluate_parser.add_argument("-d", "--data-directory", type=str, default="data", help="Where to load data files from")
reevaluate_parser.add_argument("-m", "--model", type=str, help="Model")
group3 = reevaluate_parser.add_mutually_exclusive_group()
group3.add_argument("--train-qq", action='store_true', help="Train on qq only")
group3.add_argument("--train-gg", action='store_true', help="Train on gg only")
group4 = reevaluate_parser.add_mutually_exclusive_group()
group4.add_argument("--test-qq", action='store_true', help="Test on qq only")
group4.add_argument("--test-gg", action='store_true', help="Test on gg only")
return parser, parser.parse_args(args)
def command(args):
parser, arguments = parse_arguments(args)
if not arguments:
parser.print_help()
return 1
##train models
if arguments.subtool == 'train':
from .trainer import train
from .config import get_configs
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
dataset = DataSet(arguments.data_directory, BackgroundMode.Mixed, BackgroundMode.Mixed)
start = datetime.now()
num_runs = sum(1 for x in get_configs(arguments.config_file))
##get config file:
i = 0
iterable = iter(get_configs(arguments.config_file))
config = next(iterable)
while True:
try:
while True:
train_mode = BackgroundMode.Mixed
if config['train_qq']:
train_mode = BackgroundMode.QQOnly
elif config['train_gg']:
train_mode = BackgroundMode.GGOnly
test_mode = BackgroundMode.Mixed
if config['test_qq']:
test_mode = BackgroundMode.QQOnly
elif config['test_gg']:
test_mode = BackgroundMode.GGOnly
keys = DataSet.nominal_keys.copy()
if config['run_options'] == 'conv_only' or config['run_options'] == 'combi':
keys.append('jet_image')
dataset.train_mode = train_mode
dataset.test_mode = test_mode
dataset.reset_keys(keys)
try:
train(dataset, arguments.model_directory, config)
except KeyboardInterrupt as e:
raise e
except:
with open(f"{arguments.model_directory}/{i}.log", "w") as f:
f.write(traceback.format_exc())
print(f"Model {i} failed to train, exception logged to file {arguments.model_directory}/{i}.log")
# Time projection
now = datetime.now()
duration = now - start
total_duration = duration / (i + 1) * num_runs
left_duration = total_duration - duration
finished = now + left_duration
print(f"{i+1}/{num_runs} done, time elapsed: {duration}, estimated time left: {left_duration}, projected finish by {finished}. ")
i += 1
config = next(iterable)
except KeyboardInterrupt:
del dataset
del train
import gc
gc.collect()
tf.keras.backend.clear_session()
print("Pausing, do you wish to continue? [y/n].")
pauset = datetime.now()
while True:
a = input(':')
if a == 'n':
sys.exit(0)
if a == 'y':
break
from .trainer import train
dataset = DataSet(arguments.data_directory, BackgroundMode.Mixed, BackgroundMode.Mixed)
start -= datetime.now() - pauset
## make table
if arguments.subtool == 'tabulate':
from .tabulate import tabulate
tabulate(arguments.model_directory, arguments.variable)
## correlate
if arguments.subtool == 'correlate':
from .correlate import correlate
dataset = DataSet(arguments.data_directory, BackgroundMode.Mixed, BackgroundMode.Mixed)
correlate(Path(arguments.model1), Path(arguments.model2), dataset)
##plot models
if arguments.subtool == 'plot':
from .plotter import plot
train_mode = BackgroundMode.Mixed
test_mode = BackgroundMode.Mixed
if arguments.test_qq:
test_mode = BackgroundMode.QQOnly
elif arguments.test_gg:
test_mode = BackgroundMode.GGOnly
dataset = DataSet(arguments.data_directory, train_mode, test_mode)
modeldir = Path(arguments.model_directory).resolve()
plotdir = Path(arguments.plot_directory).resolve()
plot(modeldir, plotdir, dataset)
##compariplot models
if arguments.subtool == "compariplot":
from .compariplot import compariplot
compariplot(
arguments.model_directory,
arguments.plot_directory,
arguments.range,
arguments.constraint,
arguments.category,
arguments.variable,
arguments.color_category,
arguments.filename,
arguments.markersize
)
##reevaluate models
if arguments.subtool == 'reevaluate':
from .reevaluate import reevaluate
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
train_mode = BackgroundMode.Mixed
if arguments.train_qq:
train_mode = BackgroundMode.QQOnly
elif arguments.train_gg:
train_mode = BackgroundMode.GGOnly
test_mode = BackgroundMode.Mixed
if arguments.test_qq:
test_mode = BackgroundMode.QQOnly
elif arguments.test_gg:
test_mode = BackgroundMode.GGOnly
dataset = DataSet(arguments.data_directory, train_mode, test_mode)
reevaluate(Path(arguments.model), dataset)
def main():
return command(sys.argv[1:])
if __name__ == "__main__":
main()
| 9,864 | 43.638009 | 149 | py |
DsGammaAnalysis | DsGammaAnalysis-main/ml_tool/ml_tool/designer.py | from tensorflow.keras import layers
from tensorflow.keras import models
import tensorflow as tf
from tensorflow import keras
from .model import Model
from .config import *
## Here define models:
# dense models
def create_dense_layers(config):
return_layers = []
return_layers.append(layers.Dense(config['layer1_nodes'], activation=config['layer1_activation']))
if config['layer1_dropout']:
return_layers.append(layers.Dropout(config['layer1_dropout_nodes']))
if config['layer2']:
return_layers.append(layers.Dense(config['layer2_nodes'], activation=config['layer2_activation']))
if config['layer2_dropout']:
return_layers.append(layers.Dropout(config['layer2_dropout_nodes']))
if config['layer3']:
return_layers.append(layers.Dense(config['layer3_nodes'], activation=config['layer3_activation']))
if config['layer3_dropout']:
return_layers.append(layers.Dropout(config['layer3_dropout_nodes']))
if config['run_options'] == "dense_only":
return_layers.append(layers.Dense(1, activation=config['layer_output_activation']))
return return_layers
#This is needed to create model from the layers above
def create_model(name, prepped_layers, input_size):
all_layers = [layers.InputLayer(input_shape=(input_size,))] + prepped_layers
model = keras.Sequential(all_layers)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return Model(model, name=name, metadata={})
# Convolutional only
def create_conv_layers(config):
return_layers = []
param1 = config['conv_layer1_nodes']
return_layers.append(layers.Conv2D(param1[0], (param1[1], param1[2]), activation = config['conv_layer1_activation'], padding="same"))
if config['conv_layer1_maxpooling']:
return_layers.append(layers.MaxPooling2D())
if config['conv_layer2']:
param2 = config['conv_layer2_nodes']
return_layers.append(layers.Conv2D(param2[0], (param2[1], param2[2]), activation = config['conv_layer2_activation'], padding="same"))
if config['conv_layer2_maxpooling']:
return_layers.append(layers.MaxPooling2D())
if config['conv_layer3']:
param3 = config['conv_layer3_nodes']
return_layers.append(layers.Conv2D(param3[0], (param3[1], param3[2]), activation = config['conv_layer3_activation'], padding="same"))
if config['conv_layer3_maxpooling']:
return_layers.append(layers.MaxPooling2D())
return_layers.append(layers.Flatten())
# Dense layers to finish the convoutional model:
if config['conv_dense']:
return_layers.append(layers.Dense(config['conv_denselayer_nodes'], activation=config['conv_denselayer_activation']))
if config['run_options'] == 'conv_only':
return_layers.append(layers.Dense(1, config['conv_output_activation']))
return return_layers
#This is needed to create model from the layers above
def create_conv_model(name, prepped_layers, conv_input_shape):
all_layers = [layers.InputLayer(input_shape=conv_input_shape)] + prepped_layers
model = keras.Sequential(all_layers)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return Model(model, name=name, metadata={})
# convolutional + dense
def create_conv_plus_dense_model(config, dense_input_shape, conv_input_shape, dense_layers, conv_layers):
#dense layers
final_dense_layers = [layers.InputLayer(input_shape=dense_input_shape)] + dense_layers
dense = keras.Sequential(final_dense_layers)
#convolutional layers
final_conv_layers = [layers.InputLayer(input_shape=conv_input_shape)] + conv_layers
conv = keras.Sequential(final_conv_layers)
combined = layers.concatenate((dense.output, conv.output))
x = layers.Dense(config['comb_denselayer_nodes'], activation=config['comb_denselayer_activation'])(combined)
x = layers.Dense(1, activation=config['comb_output_activation'])(x)
model = models.Model(inputs=[dense.input, conv.input], outputs=x)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
return Model(model, name=config['model_name'], metadata={})
| 4,161 | 46.83908 | 141 | py |
DsGammaAnalysis | DsGammaAnalysis-main/ml_tool/ml_tool/model.py | from dataclasses import dataclass
from typing import Dict, List, Union
from pathlib import Path
import json
from tensorflow import keras
@dataclass
class Model:
model: keras.Model
name: str
metadata: Dict[str, Union[str, int, bool, list]]
def save(self, directory) -> None:
self.model.save(str(Path(directory).resolve() / self.name))
(Path(directory).resolve() / self.name / ".custom.metadata.json").write_text(json.dumps({
"name": self.name,
"metadata": self.metadata
}))
@classmethod
def load(cls, file) -> 'Model':
return cls(
model=keras.models.load_model(str(file)),
**json.loads((file / ".custom.metadata.json").read_text())
)
@classmethod
def load_multi(cls, directory) -> List['Model']:
return [cls.load(file) for file in Path(directory).resolve().glob("*") if file.is_dir() and (file / ".custom.metadata.json").exists()]
@classmethod
def load_multi_meta_only(cls, directory) -> List['Model']:
return [Model(
model=None,
**json.loads((file / ".custom.metadata.json").read_text())
) for file in Path(directory).resolve().glob("*") if file.is_dir() and (file / ".custom.metadata.json").exists()]
| 1,289 | 30.463415 | 143 | py |
airflow | airflow-main/airflow/configuration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import functools
import json
import logging
import multiprocessing
import os
import pathlib
import shlex
import stat
import subprocess
import sys
import warnings
from base64 import b64encode
from collections import OrderedDict
# Ignored Mypy on configparser because it thinks the configparser module has no _UNSET attribute
from configparser import _UNSET, ConfigParser, NoOptionError, NoSectionError # type: ignore
from contextlib import contextmanager, suppress
from json.decoder import JSONDecodeError
from typing import IO, Any, Dict, Iterable, Pattern, Set, Tuple, Union
from urllib.parse import urlsplit
import re2
from typing_extensions import overload
from airflow.auth.managers.base_auth_manager import BaseAuthManager
from airflow.exceptions import AirflowConfigException
from airflow.secrets import DEFAULT_SECRETS_SEARCH_PATH, BaseSecretsBackend
from airflow.utils import yaml
from airflow.utils.module_loading import import_string
from airflow.utils.weight_rule import WeightRule
log = logging.getLogger(__name__)
# show Airflow's deprecation warnings
if not sys.warnoptions:
warnings.filterwarnings(action="default", category=DeprecationWarning, module="airflow")
warnings.filterwarnings(action="default", category=PendingDeprecationWarning, module="airflow")
_SQLITE3_VERSION_PATTERN = re2.compile(r"(?P<version>^\d+(?:\.\d+)*)\D?.*$")
ConfigType = Union[str, int, float, bool]
ConfigOptionsDictType = Dict[str, ConfigType]
ConfigSectionSourcesType = Dict[str, Union[str, Tuple[str, str]]]
ConfigSourcesType = Dict[str, ConfigSectionSourcesType]
ENV_VAR_PREFIX = "AIRFLOW__"
def _parse_sqlite_version(s: str) -> tuple[int, ...]:
match = _SQLITE3_VERSION_PATTERN.match(s)
if match is None:
return ()
return tuple(int(p) for p in match.group("version").split("."))
@overload
def expand_env_var(env_var: None) -> None:
...
@overload
def expand_env_var(env_var: str) -> str:
...
def expand_env_var(env_var: str | None) -> str | None:
"""
Expands (potentially nested) env vars.
Repeat and apply `expandvars` and `expanduser` until
interpolation stops having any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
def run_command(command: str) -> str:
"""Runs command and returns stdout."""
process = subprocess.Popen(
shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True
)
output, stderr = (stream.decode(sys.getdefaultencoding(), "ignore") for stream in process.communicate())
if process.returncode != 0:
raise AirflowConfigException(
f"Cannot execute {command}. Error code is: {process.returncode}. "
f"Output: {output}, Stderr: {stderr}"
)
return output
def _get_config_value_from_secret_backend(config_key: str) -> str | None:
"""Get Config option values from Secret Backend."""
try:
secrets_client = get_custom_secret_backend()
if not secrets_client:
return None
return secrets_client.get_config(config_key)
except Exception as e:
raise AirflowConfigException(
"Cannot retrieve config from alternative secrets backend. "
"Make sure it is configured properly and that the Backend "
"is accessible.\n"
f"{e}"
)
def _default_config_file_path(file_name: str) -> str:
templates_dir = os.path.join(os.path.dirname(__file__), "config_templates")
return os.path.join(templates_dir, file_name)
def default_config_yaml() -> dict[str, Any]:
"""
Read Airflow configs from YAML file.
:return: Python dictionary containing configs & their info
"""
with open(_default_config_file_path("config.yml")) as config_file:
return yaml.safe_load(config_file)
class AirflowConfigParser(ConfigParser):
"""Custom Airflow Configparser supporting defaults and deprecated options."""
# These configuration elements can be fetched as the stdout of commands
# following the "{section}__{name}_cmd" pattern, the idea behind this
# is to not store password on boxes in text files.
# These configs can also be fetched from Secrets backend
# following the "{section}__{name}__secret" pattern
@functools.cached_property
def sensitive_config_values(self) -> Set[tuple[str, str]]: # noqa: UP006
default_config = default_config_yaml()
flattened = {
(s, k): item for s, s_c in default_config.items() for k, item in s_c.get("options").items()
}
sensitive = {(section, key) for (section, key), v in flattened.items() if v.get("sensitive") is True}
depr_option = {self.deprecated_options[x][:-1] for x in sensitive if x in self.deprecated_options}
depr_section = {
(self.deprecated_sections[s][0], k) for s, k in sensitive if s in self.deprecated_sections
}
sensitive.update(depr_section, depr_option)
return sensitive
# A mapping of (new section, new option) -> (old section, old option, since_version).
# When reading new option, the old option will be checked to see if it exists. If it does a
# DeprecationWarning will be issued and the old option will be used instead
deprecated_options: dict[tuple[str, str], tuple[str, str, str]] = {
("celery", "worker_precheck"): ("core", "worker_precheck", "2.0.0"),
("logging", "interleave_timestamp_parser"): ("core", "interleave_timestamp_parser", "2.6.1"),
("logging", "base_log_folder"): ("core", "base_log_folder", "2.0.0"),
("logging", "remote_logging"): ("core", "remote_logging", "2.0.0"),
("logging", "remote_log_conn_id"): ("core", "remote_log_conn_id", "2.0.0"),
("logging", "remote_base_log_folder"): ("core", "remote_base_log_folder", "2.0.0"),
("logging", "encrypt_s3_logs"): ("core", "encrypt_s3_logs", "2.0.0"),
("logging", "logging_level"): ("core", "logging_level", "2.0.0"),
("logging", "fab_logging_level"): ("core", "fab_logging_level", "2.0.0"),
("logging", "logging_config_class"): ("core", "logging_config_class", "2.0.0"),
("logging", "colored_console_log"): ("core", "colored_console_log", "2.0.0"),
("logging", "colored_log_format"): ("core", "colored_log_format", "2.0.0"),
("logging", "colored_formatter_class"): ("core", "colored_formatter_class", "2.0.0"),
("logging", "log_format"): ("core", "log_format", "2.0.0"),
("logging", "simple_log_format"): ("core", "simple_log_format", "2.0.0"),
("logging", "task_log_prefix_template"): ("core", "task_log_prefix_template", "2.0.0"),
("logging", "log_filename_template"): ("core", "log_filename_template", "2.0.0"),
("logging", "log_processor_filename_template"): ("core", "log_processor_filename_template", "2.0.0"),
("logging", "dag_processor_manager_log_location"): (
"core",
"dag_processor_manager_log_location",
"2.0.0",
),
("logging", "task_log_reader"): ("core", "task_log_reader", "2.0.0"),
("metrics", "metrics_allow_list"): ("metrics", "statsd_allow_list", "2.6.0"),
("metrics", "metrics_block_list"): ("metrics", "statsd_block_list", "2.6.0"),
("metrics", "statsd_on"): ("scheduler", "statsd_on", "2.0.0"),
("metrics", "statsd_host"): ("scheduler", "statsd_host", "2.0.0"),
("metrics", "statsd_port"): ("scheduler", "statsd_port", "2.0.0"),
("metrics", "statsd_prefix"): ("scheduler", "statsd_prefix", "2.0.0"),
("metrics", "statsd_allow_list"): ("scheduler", "statsd_allow_list", "2.0.0"),
("metrics", "stat_name_handler"): ("scheduler", "stat_name_handler", "2.0.0"),
("metrics", "statsd_datadog_enabled"): ("scheduler", "statsd_datadog_enabled", "2.0.0"),
("metrics", "statsd_datadog_tags"): ("scheduler", "statsd_datadog_tags", "2.0.0"),
("metrics", "statsd_datadog_metrics_tags"): ("scheduler", "statsd_datadog_metrics_tags", "2.6.0"),
("metrics", "statsd_custom_client_path"): ("scheduler", "statsd_custom_client_path", "2.0.0"),
("scheduler", "parsing_processes"): ("scheduler", "max_threads", "1.10.14"),
("scheduler", "scheduler_idle_sleep_time"): ("scheduler", "processor_poll_interval", "2.2.0"),
("operators", "default_queue"): ("celery", "default_queue", "2.1.0"),
("core", "hide_sensitive_var_conn_fields"): ("admin", "hide_sensitive_variable_fields", "2.1.0"),
("core", "sensitive_var_conn_names"): ("admin", "sensitive_variable_fields", "2.1.0"),
("core", "default_pool_task_slot_count"): ("core", "non_pooled_task_slot_count", "1.10.4"),
("core", "max_active_tasks_per_dag"): ("core", "dag_concurrency", "2.2.0"),
("logging", "worker_log_server_port"): ("celery", "worker_log_server_port", "2.2.0"),
("api", "access_control_allow_origins"): ("api", "access_control_allow_origin", "2.2.0"),
("api", "auth_backends"): ("api", "auth_backend", "2.3.0"),
("database", "sql_alchemy_conn"): ("core", "sql_alchemy_conn", "2.3.0"),
("database", "sql_engine_encoding"): ("core", "sql_engine_encoding", "2.3.0"),
("database", "sql_engine_collation_for_ids"): ("core", "sql_engine_collation_for_ids", "2.3.0"),
("database", "sql_alchemy_pool_enabled"): ("core", "sql_alchemy_pool_enabled", "2.3.0"),
("database", "sql_alchemy_pool_size"): ("core", "sql_alchemy_pool_size", "2.3.0"),
("database", "sql_alchemy_max_overflow"): ("core", "sql_alchemy_max_overflow", "2.3.0"),
("database", "sql_alchemy_pool_recycle"): ("core", "sql_alchemy_pool_recycle", "2.3.0"),
("database", "sql_alchemy_pool_pre_ping"): ("core", "sql_alchemy_pool_pre_ping", "2.3.0"),
("database", "sql_alchemy_schema"): ("core", "sql_alchemy_schema", "2.3.0"),
("database", "sql_alchemy_connect_args"): ("core", "sql_alchemy_connect_args", "2.3.0"),
("database", "load_default_connections"): ("core", "load_default_connections", "2.3.0"),
("database", "max_db_retries"): ("core", "max_db_retries", "2.3.0"),
("scheduler", "parsing_cleanup_interval"): ("scheduler", "deactivate_stale_dags_interval", "2.5.0"),
("scheduler", "task_queued_timeout_check_interval"): (
"kubernetes_executor",
"worker_pods_pending_timeout_check_interval",
"2.6.0",
),
}
# A mapping of new configurations to a list of old configurations for when one configuration
# deprecates more than one other deprecation. The deprecation logic for these configurations
# is defined in SchedulerJobRunner.
many_to_one_deprecated_options: dict[tuple[str, str], list[tuple[str, str, str]]] = {
("scheduler", "task_queued_timeout"): [
("celery", "stalled_task_timeout", "2.6.0"),
("celery", "task_adoption_timeout", "2.6.0"),
("kubernetes_executor", "worker_pods_pending_timeout", "2.6.0"),
]
}
# A mapping of new section -> (old section, since_version).
deprecated_sections: dict[str, tuple[str, str]] = {"kubernetes_executor": ("kubernetes", "2.5.0")}
# Now build the inverse so we can go from old_section/old_key to new_section/new_key
# if someone tries to retrieve it based on old_section/old_key
@functools.cached_property
def inversed_deprecated_options(self):
return {(sec, name): key for key, (sec, name, ver) in self.deprecated_options.items()}
@functools.cached_property
def inversed_deprecated_sections(self):
return {
old_section: new_section for new_section, (old_section, ver) in self.deprecated_sections.items()
}
# A mapping of old default values that we want to change and warn the user
# about. Mapping of section -> setting -> { old, replace, by_version }
deprecated_values: dict[str, dict[str, tuple[Pattern, str, str]]] = {
"core": {
"hostname_callable": (re2.compile(r":"), r".", "2.1"),
},
"webserver": {
"navbar_color": (re2.compile(r"(?i)\A#007A87\z"), "#fff", "2.1"),
"dag_default_view": (re2.compile(r"^tree$"), "grid", "3.0"),
},
"email": {
"email_backend": (
re2.compile(r"^airflow\.contrib\.utils\.sendgrid\.send_email$"),
r"airflow.providers.sendgrid.utils.emailer.send_email",
"2.1",
),
},
"logging": {
"log_filename_template": (
re2.compile(re2.escape("{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log")),
"XX-set-after-default-config-loaded-XX",
"3.0",
),
},
"api": {
"auth_backends": (
re2.compile(r"^airflow\.api\.auth\.backend\.deny_all$|^$"),
"airflow.api.auth.backend.session",
"3.0",
),
},
"elasticsearch": {
"log_id_template": (
re2.compile("^" + re2.escape("{dag_id}-{task_id}-{execution_date}-{try_number}") + "$"),
"{dag_id}-{task_id}-{run_id}-{map_index}-{try_number}",
"3.0",
)
},
}
_available_logging_levels = ["CRITICAL", "FATAL", "ERROR", "WARN", "WARNING", "INFO", "DEBUG"]
enums_options = {
("core", "default_task_weight_rule"): sorted(WeightRule.all_weight_rules()),
("core", "dag_ignore_file_syntax"): ["regexp", "glob"],
("core", "mp_start_method"): multiprocessing.get_all_start_methods(),
("scheduler", "file_parsing_sort_mode"): ["modified_time", "random_seeded_by_host", "alphabetical"],
("logging", "logging_level"): _available_logging_levels,
("logging", "fab_logging_level"): _available_logging_levels,
# celery_logging_level can be empty, which uses logging_level as fallback
("logging", "celery_logging_level"): _available_logging_levels + [""],
("webserver", "analytical_tool"): ["google_analytics", "metarouter", "segment", ""],
}
upgraded_values: dict[tuple[str, str], str]
"""Mapping of (section,option) to the old value that was upgraded"""
# This method transforms option names on every read, get, or set operation.
# This changes from the default behaviour of ConfigParser from lower-casing
# to instead be case-preserving
def optionxform(self, optionstr: str) -> str:
return optionstr
def __init__(self, default_config: str | None = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.upgraded_values = {}
self.airflow_defaults = ConfigParser(*args, **kwargs)
if default_config is not None:
self.airflow_defaults.read_string(default_config)
# Set the upgrade value based on the current loaded default
default = self.airflow_defaults.get("logging", "log_filename_template", fallback=None)
if default:
replacement = self.deprecated_values["logging"]["log_filename_template"]
self.deprecated_values["logging"]["log_filename_template"] = (
replacement[0],
default,
replacement[2],
)
else:
# In case of tests it might not exist
with suppress(KeyError):
del self.deprecated_values["logging"]["log_filename_template"]
else:
with suppress(KeyError):
del self.deprecated_values["logging"]["log_filename_template"]
self.is_validated = False
self._suppress_future_warnings = False
def validate(self):
self._validate_sqlite3_version()
self._validate_enums()
for section, replacement in self.deprecated_values.items():
for name, info in replacement.items():
old, new, version = info
current_value = self.get(section, name, fallback="")
if self._using_old_value(old, current_value):
self.upgraded_values[(section, name)] = current_value
new_value = old.sub(new, current_value)
self._update_env_var(section=section, name=name, new_value=new_value)
self._create_future_warning(
name=name,
section=section,
current_value=current_value,
new_value=new_value,
version=version,
)
self._upgrade_auth_backends()
self._upgrade_postgres_metastore_conn()
self.is_validated = True
def _upgrade_auth_backends(self):
"""
Ensure a custom auth_backends setting contains session.
This is required by the UI for ajax queries.
"""
old_value = self.get("api", "auth_backends", fallback="")
if old_value in ("airflow.api.auth.backend.default", ""):
# handled by deprecated_values
pass
elif old_value.find("airflow.api.auth.backend.session") == -1:
new_value = old_value + ",airflow.api.auth.backend.session"
self._update_env_var(section="api", name="auth_backends", new_value=new_value)
self.upgraded_values[("api", "auth_backends")] = old_value
# if the old value is set via env var, we need to wipe it
# otherwise, it'll "win" over our adjusted value
old_env_var = self._env_var_name("api", "auth_backend")
os.environ.pop(old_env_var, None)
warnings.warn(
"The auth_backends setting in [api] has had airflow.api.auth.backend.session added "
"in the running config, which is needed by the UI. Please update your config before "
"Apache Airflow 3.0.",
FutureWarning,
)
def _upgrade_postgres_metastore_conn(self):
"""
Upgrade SQL schemas.
As of SQLAlchemy 1.4, schemes `postgres+psycopg2` and `postgres`
must be replaced with `postgresql`.
"""
section, key = "database", "sql_alchemy_conn"
old_value = self.get(section, key, _extra_stacklevel=1)
bad_schemes = ["postgres+psycopg2", "postgres"]
good_scheme = "postgresql"
parsed = urlsplit(old_value)
if parsed.scheme in bad_schemes:
warnings.warn(
f"Bad scheme in Airflow configuration core > sql_alchemy_conn: `{parsed.scheme}`. "
"As of SQLAlchemy 1.4 (adopted in Airflow 2.3) this is no longer supported. You must "
f"change to `{good_scheme}` before the next Airflow release.",
FutureWarning,
)
self.upgraded_values[(section, key)] = old_value
new_value = re2.sub("^" + re2.escape(f"{parsed.scheme}://"), f"{good_scheme}://", old_value)
self._update_env_var(section=section, name=key, new_value=new_value)
# if the old value is set via env var, we need to wipe it
# otherwise, it'll "win" over our adjusted value
old_env_var = self._env_var_name("core", key)
os.environ.pop(old_env_var, None)
def _validate_enums(self):
"""Validate that enum type config has an accepted value."""
for (section_key, option_key), enum_options in self.enums_options.items():
if self.has_option(section_key, option_key):
value = self.get(section_key, option_key)
if value not in enum_options:
raise AirflowConfigException(
f"`[{section_key}] {option_key}` should not be "
f"{value!r}. Possible values: {', '.join(enum_options)}."
)
def _validate_sqlite3_version(self):
"""Validate SQLite version.
Some features in storing rendered fields require SQLite >= 3.15.0.
"""
if "sqlite" not in self.get("database", "sql_alchemy_conn"):
return
import sqlite3
min_sqlite_version = (3, 15, 0)
if _parse_sqlite_version(sqlite3.sqlite_version) >= min_sqlite_version:
return
from airflow.utils.docs import get_docs_url
min_sqlite_version_str = ".".join(str(s) for s in min_sqlite_version)
raise AirflowConfigException(
f"error: SQLite C library too old (< {min_sqlite_version_str}). "
f"See {get_docs_url('howto/set-up-database.html#setting-up-a-sqlite-database')}"
)
def _using_old_value(self, old: Pattern, current_value: str) -> bool:
return old.search(current_value) is not None
def _update_env_var(self, section: str, name: str, new_value: str):
env_var = self._env_var_name(section, name)
# Set it as an env var so that any subprocesses keep the same override!
os.environ[env_var] = new_value
@staticmethod
def _create_future_warning(name: str, section: str, current_value: Any, new_value: Any, version: str):
warnings.warn(
f"The {name!r} setting in [{section}] has the old default value of {current_value!r}. "
f"This value has been changed to {new_value!r} in the running config, but "
f"please update your config before Apache Airflow {version}.",
FutureWarning,
)
def _env_var_name(self, section: str, key: str) -> str:
return f"{ENV_VAR_PREFIX}{section.replace('.', '_').upper()}__{key.upper()}"
def _get_env_var_option(self, section: str, key: str):
# must have format AIRFLOW__{SECTION}__{KEY} (note double underscore)
env_var = self._env_var_name(section, key)
if env_var in os.environ:
return expand_env_var(os.environ[env_var])
# alternatively AIRFLOW__{SECTION}__{KEY}_CMD (for a command)
env_var_cmd = env_var + "_CMD"
if env_var_cmd in os.environ:
# if this is a valid command key...
if (section, key) in self.sensitive_config_values:
return run_command(os.environ[env_var_cmd])
# alternatively AIRFLOW__{SECTION}__{KEY}_SECRET (to get from Secrets Backend)
env_var_secret_path = env_var + "_SECRET"
if env_var_secret_path in os.environ:
# if this is a valid secret path...
if (section, key) in self.sensitive_config_values:
return _get_config_value_from_secret_backend(os.environ[env_var_secret_path])
return None
def _get_cmd_option(self, section: str, key: str):
fallback_key = key + "_cmd"
if (section, key) in self.sensitive_config_values:
if super().has_option(section, fallback_key):
command = super().get(section, fallback_key)
return run_command(command)
return None
def _get_cmd_option_from_config_sources(
self, config_sources: ConfigSourcesType, section: str, key: str
) -> str | None:
fallback_key = key + "_cmd"
if (section, key) in self.sensitive_config_values:
section_dict = config_sources.get(section)
if section_dict is not None:
command_value = section_dict.get(fallback_key)
if command_value is not None:
if isinstance(command_value, str):
command = command_value
else:
command = command_value[0]
return run_command(command)
return None
def _get_secret_option(self, section: str, key: str) -> str | None:
"""Get Config option values from Secret Backend."""
fallback_key = key + "_secret"
if (section, key) in self.sensitive_config_values:
if super().has_option(section, fallback_key):
secrets_path = super().get(section, fallback_key)
return _get_config_value_from_secret_backend(secrets_path)
return None
def _get_secret_option_from_config_sources(
self, config_sources: ConfigSourcesType, section: str, key: str
) -> str | None:
fallback_key = key + "_secret"
if (section, key) in self.sensitive_config_values:
section_dict = config_sources.get(section)
if section_dict is not None:
secrets_path_value = section_dict.get(fallback_key)
if secrets_path_value is not None:
if isinstance(secrets_path_value, str):
secrets_path = secrets_path_value
else:
secrets_path = secrets_path_value[0]
return _get_config_value_from_secret_backend(secrets_path)
return None
def get_mandatory_value(self, section: str, key: str, **kwargs) -> str:
value = self.get(section, key, _extra_stacklevel=1, **kwargs)
if value is None:
raise ValueError(f"The value {section}/{key} should be set!")
return value
@overload # type: ignore[override]
def get(self, section: str, key: str, fallback: str = ..., **kwargs) -> str: # type: ignore[override]
...
@overload # type: ignore[override]
def get(self, section: str, key: str, **kwargs) -> str | None: # type: ignore[override]
...
def get( # type: ignore[override, misc]
self,
section: str,
key: str,
_extra_stacklevel: int = 0,
**kwargs,
) -> str | None:
section = str(section).lower()
key = str(key).lower()
warning_emitted = False
deprecated_section: str | None
deprecated_key: str | None
# For when we rename whole sections
if section in self.inversed_deprecated_sections:
deprecated_section, deprecated_key = (section, key)
section = self.inversed_deprecated_sections[section]
if not self._suppress_future_warnings:
warnings.warn(
f"The config section [{deprecated_section}] has been renamed to "
f"[{section}]. Please update your `conf.get*` call to use the new name",
FutureWarning,
stacklevel=2 + _extra_stacklevel,
)
# Don't warn about individual rename if the whole section is renamed
warning_emitted = True
elif (section, key) in self.inversed_deprecated_options:
# Handle using deprecated section/key instead of the new section/key
new_section, new_key = self.inversed_deprecated_options[(section, key)]
if not self._suppress_future_warnings and not warning_emitted:
warnings.warn(
f"section/key [{section}/{key}] has been deprecated, you should use"
f"[{new_section}/{new_key}] instead. Please update your `conf.get*` call to use the "
"new name",
FutureWarning,
stacklevel=2 + _extra_stacklevel,
)
warning_emitted = True
deprecated_section, deprecated_key = section, key
section, key = (new_section, new_key)
elif section in self.deprecated_sections:
# When accessing the new section name, make sure we check under the old config name
deprecated_key = key
deprecated_section = self.deprecated_sections[section][0]
else:
deprecated_section, deprecated_key, _ = self.deprecated_options.get(
(section, key), (None, None, None)
)
# first check environment variables
option = self._get_environment_variables(
deprecated_key,
deprecated_section,
key,
section,
issue_warning=not warning_emitted,
extra_stacklevel=_extra_stacklevel,
)
if option is not None:
return option
# ...then the config file
option = self._get_option_from_config_file(
deprecated_key,
deprecated_section,
key,
kwargs,
section,
issue_warning=not warning_emitted,
extra_stacklevel=_extra_stacklevel,
)
if option is not None:
return option
# ...then commands
option = self._get_option_from_commands(
deprecated_key,
deprecated_section,
key,
section,
issue_warning=not warning_emitted,
extra_stacklevel=_extra_stacklevel,
)
if option is not None:
return option
# ...then from secret backends
option = self._get_option_from_secrets(
deprecated_key,
deprecated_section,
key,
section,
issue_warning=not warning_emitted,
extra_stacklevel=_extra_stacklevel,
)
if option is not None:
return option
# ...then the default config
if self.airflow_defaults.has_option(section, key) or "fallback" in kwargs:
return expand_env_var(self.airflow_defaults.get(section, key, **kwargs))
log.warning("section/key [%s/%s] not found in config", section, key)
raise AirflowConfigException(f"section/key [{section}/{key}] not found in config")
def _get_option_from_secrets(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
) -> str | None:
option = self._get_secret_option(section, key)
if option:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_secret_option(deprecated_section, deprecated_key)
if option:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return None
def _get_option_from_commands(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
) -> str | None:
option = self._get_cmd_option(section, key)
if option:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_cmd_option(deprecated_section, deprecated_key)
if option:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return None
def _get_option_from_config_file(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
kwargs: dict[str, Any],
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
) -> str | None:
if super().has_option(section, key):
# Use the parent's methods to get the actual config here to be able to
# separate the config from default config.
return expand_env_var(super().get(section, key, **kwargs))
if deprecated_section and deprecated_key:
if super().has_option(deprecated_section, deprecated_key):
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
with self.suppress_future_warnings():
return expand_env_var(super().get(deprecated_section, deprecated_key, **kwargs))
return None
def _get_environment_variables(
self,
deprecated_key: str | None,
deprecated_section: str | None,
key: str,
section: str,
issue_warning: bool = True,
extra_stacklevel: int = 0,
) -> str | None:
option = self._get_env_var_option(section, key)
if option is not None:
return option
if deprecated_section and deprecated_key:
with self.suppress_future_warnings():
option = self._get_env_var_option(deprecated_section, deprecated_key)
if option is not None:
if issue_warning:
self._warn_deprecate(section, key, deprecated_section, deprecated_key, extra_stacklevel)
return option
return None
def getboolean(self, section: str, key: str, **kwargs) -> bool: # type: ignore[override]
val = str(self.get(section, key, _extra_stacklevel=1, **kwargs)).lower().strip()
if "#" in val:
val = val.split("#")[0].strip()
if val in ("t", "true", "1"):
return True
elif val in ("f", "false", "0"):
return False
else:
raise AirflowConfigException(
f'Failed to convert value to bool. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getint(self, section: str, key: str, **kwargs) -> int: # type: ignore[override]
val = self.get(section, key, _extra_stacklevel=1, **kwargs)
if val is None:
raise AirflowConfigException(
f"Failed to convert value None to int. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
return int(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to int. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getfloat(self, section: str, key: str, **kwargs) -> float: # type: ignore[override]
val = self.get(section, key, _extra_stacklevel=1, **kwargs)
if val is None:
raise AirflowConfigException(
f"Failed to convert value None to float. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
return float(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to float. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getimport(self, section: str, key: str, **kwargs) -> Any:
"""
Reads options, imports the full qualified name, and returns the object.
In case of failure, it throws an exception with the key and section names
:return: The object or None, if the option is empty
"""
full_qualified_path = conf.get(section=section, key=key, **kwargs)
if not full_qualified_path:
return None
try:
return import_string(full_qualified_path)
except ImportError as e:
log.error(e)
raise AirflowConfigException(
f'The object could not be loaded. Please check "{key}" key in "{section}" section. '
f'Current value: "{full_qualified_path}".'
)
def getjson(
self, section: str, key: str, fallback=_UNSET, **kwargs
) -> dict | list | str | int | float | None:
"""
Return a config value parsed from a JSON string.
``fallback`` is *not* JSON parsed but used verbatim when no config value is given.
"""
# get always returns the fallback value as a string, so for this if
# someone gives us an object we want to keep that
default = _UNSET
if fallback is not _UNSET:
default = fallback
fallback = _UNSET
try:
data = self.get(section=section, key=key, fallback=fallback, _extra_stacklevel=1, **kwargs)
except (NoSectionError, NoOptionError):
return default
if not data:
return default if default is not _UNSET else None
try:
return json.loads(data)
except JSONDecodeError as e:
raise AirflowConfigException(f"Unable to parse [{section}] {key!r} as valid json") from e
def gettimedelta(
self, section: str, key: str, fallback: Any = None, **kwargs
) -> datetime.timedelta | None:
"""
Gets the config value for the given section and key, and converts it into datetime.timedelta object.
If the key is missing, then it is considered as `None`.
:param section: the section from the config
:param key: the key defined in the given section
:param fallback: fallback value when no config value is given, defaults to None
:raises AirflowConfigException: raised because ValueError or OverflowError
:return: datetime.timedelta(seconds=<config_value>) or None
"""
val = self.get(section, key, fallback=fallback, _extra_stacklevel=1, **kwargs)
if val:
# the given value must be convertible to integer
try:
int_val = int(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to int. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
try:
return datetime.timedelta(seconds=int_val)
except OverflowError as err:
raise AirflowConfigException(
f"Failed to convert value to timedelta in `seconds`. "
f"{err}. "
f'Please check "{key}" key in "{section}" section. Current value: "{val}".'
)
return fallback
def read(
self,
filenames: (str | bytes | os.PathLike | Iterable[str | bytes | os.PathLike]),
encoding=None,
):
super().read(filenames=filenames, encoding=encoding)
# The RawConfigParser defines "Mapping" from abc.collections is not subscriptable - so we have
# to use Dict here.
def read_dict( # type: ignore[override]
self, dictionary: dict[str, dict[str, Any]], source: str = "<dict>"
):
super().read_dict(dictionary=dictionary, source=source)
def has_option(self, section: str, option: str) -> bool:
try:
# Using self.get() to avoid reimplementing the priority order
# of config variables (env, config, cmd, defaults)
# UNSET to avoid logging a warning about missing values
self.get(section, option, fallback=_UNSET, _extra_stacklevel=1)
return True
except (NoOptionError, NoSectionError):
return False
def remove_option(self, section: str, option: str, remove_default: bool = True):
"""
Remove an option if it exists in config from a file or default config.
If both of config have the same option, this removes the option
in both configs unless remove_default=False.
"""
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option)
def getsection(self, section: str) -> ConfigOptionsDictType | None:
"""
Returns the section as a dict.
Values are converted to int, float, bool as required.
:param section: section from the config
"""
if not self.has_section(section) and not self.airflow_defaults.has_section(section):
return None
if self.airflow_defaults.has_section(section):
_section: ConfigOptionsDictType = OrderedDict(self.airflow_defaults.items(section))
else:
_section = OrderedDict()
if self.has_section(section):
_section.update(OrderedDict(self.items(section)))
section_prefix = self._env_var_name(section, "")
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, "")
if key.endswith("_CMD"):
key = key[:-4]
key = key.lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in _section.items():
if val is None:
raise AirflowConfigException(
f"Failed to convert value automatically. "
f'Please check "{key}" key in "{section}" section is set.'
)
try:
_section[key] = int(val)
except ValueError:
try:
_section[key] = float(val)
except ValueError:
if isinstance(val, str) and val.lower() in ("t", "true"):
_section[key] = True
elif isinstance(val, str) and val.lower() in ("f", "false"):
_section[key] = False
return _section
def write( # type: ignore[override]
self, fp: IO, space_around_delimiters: bool = True, section: str | None = None
) -> None:
# This is based on the configparser.RawConfigParser.write method code to add support for
# reading options from environment variables.
# Various type ignores below deal with less-than-perfect RawConfigParser superclass typing
if space_around_delimiters:
delimiter = f" {self._delimiters[0]} " # type: ignore[attr-defined]
else:
delimiter = self._delimiters[0] # type: ignore[attr-defined]
if self._defaults: # type: ignore
self._write_section( # type: ignore[attr-defined]
fp, self.default_section, self._defaults.items(), delimiter # type: ignore[attr-defined]
)
sections = (
{section: dict(self.getsection(section))} # type: ignore[arg-type]
if section
else self._sections # type: ignore[attr-defined]
)
for sect in sections:
item_section: ConfigOptionsDictType = self.getsection(sect) # type: ignore[assignment]
self._write_section(fp, sect, item_section.items(), delimiter) # type: ignore[attr-defined]
def as_dict(
self,
display_source: bool = False,
display_sensitive: bool = False,
raw: bool = False,
include_env: bool = True,
include_cmds: bool = True,
include_secret: bool = True,
) -> ConfigSourcesType:
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
When materializing current configuration Airflow defaults are
materialized along with user set configs. If any of the `include_*`
options are False then the result of calling command or secret key
configs do not override Airflow defaults and instead are passed through.
In order to then avoid Airflow defaults from overwriting user set
command or secret key configs we filter out bare sensitive_config_values
that are set to Airflow defaults when command or secret key configs
produce different values.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg', 'default', 'env var', or 'cmd'.
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:param raw: Should the values be output as interpolated values, or the
"raw" form that can be fed back in to ConfigParser
:param include_env: Should the value of configuration from AIRFLOW__
environment variables be included or not
:param include_cmds: Should the result of calling any *_cmd config be
set (True, default), or should the _cmd options be left as the
command to run (False)
:param include_secret: Should the result of calling any *_secret config be
set (True, default), or should the _secret options be left as the
path to get the secret from (False)
:return: Dictionary, where the key is the name of the section and the content is
the dictionary with the name of the parameter and its value.
"""
if not display_sensitive:
# We want to hide the sensitive values at the appropriate methods
# since envs from cmds, secrets can be read at _include_envs method
if not all([include_env, include_cmds, include_secret]):
raise ValueError(
"If display_sensitive is false, then include_env, "
"include_cmds, include_secret must all be set as True"
)
config_sources: ConfigSourcesType = {}
configs = [
("default", self.airflow_defaults),
("airflow.cfg", self),
]
self._replace_config_with_display_sources(
config_sources,
configs,
display_source,
raw,
self.deprecated_options,
include_cmds=include_cmds,
include_env=include_env,
include_secret=include_secret,
)
# add env vars and overwrite because they have priority
if include_env:
self._include_envs(config_sources, display_sensitive, display_source, raw)
else:
self._filter_by_source(config_sources, display_source, self._get_env_var_option)
# add bash commands
if include_cmds:
self._include_commands(config_sources, display_sensitive, display_source, raw)
else:
self._filter_by_source(config_sources, display_source, self._get_cmd_option)
# add config from secret backends
if include_secret:
self._include_secrets(config_sources, display_sensitive, display_source, raw)
else:
self._filter_by_source(config_sources, display_source, self._get_secret_option)
if not display_sensitive:
# This ensures the ones from config file is hidden too
# if they are not provided through env, cmd and secret
hidden = "< hidden >"
for section, key in self.sensitive_config_values:
if not config_sources.get(section):
continue
if config_sources[section].get(key, None):
if display_source:
source = config_sources[section][key][1]
config_sources[section][key] = (hidden, source)
else:
config_sources[section][key] = hidden
return config_sources
def _include_secrets(
self,
config_sources: ConfigSourcesType,
display_sensitive: bool,
display_source: bool,
raw: bool,
):
for section, key in self.sensitive_config_values:
value: str | None = self._get_secret_option_from_config_sources(config_sources, section, key)
if value:
if not display_sensitive:
value = "< hidden >"
if display_source:
opt: str | tuple[str, str] = (value, "secret")
elif raw:
opt = value.replace("%", "%%")
else:
opt = value
config_sources.setdefault(section, OrderedDict()).update({key: opt})
del config_sources[section][key + "_secret"]
def _include_commands(
self,
config_sources: ConfigSourcesType,
display_sensitive: bool,
display_source: bool,
raw: bool,
):
for section, key in self.sensitive_config_values:
opt = self._get_cmd_option_from_config_sources(config_sources, section, key)
if not opt:
continue
opt_to_set: str | tuple[str, str] | None = opt
if not display_sensitive:
opt_to_set = "< hidden >"
if display_source:
opt_to_set = (str(opt_to_set), "cmd")
elif raw:
opt_to_set = str(opt_to_set).replace("%", "%%")
if opt_to_set is not None:
dict_to_update: dict[str, str | tuple[str, str]] = {key: opt_to_set}
config_sources.setdefault(section, OrderedDict()).update(dict_to_update)
del config_sources[section][key + "_cmd"]
def _include_envs(
self,
config_sources: ConfigSourcesType,
display_sensitive: bool,
display_source: bool,
raw: bool,
):
for env_var in [
os_environment for os_environment in os.environ if os_environment.startswith(ENV_VAR_PREFIX)
]:
try:
_, section, key = env_var.split("__", 2)
opt = self._get_env_var_option(section, key)
except ValueError:
continue
if opt is None:
log.warning("Ignoring unknown env var '%s'", env_var)
continue
if not display_sensitive and env_var != self._env_var_name("core", "unit_test_mode"):
# Don't hide cmd/secret values here
if not env_var.lower().endswith("cmd") and not env_var.lower().endswith("secret"):
if (section, key) in self.sensitive_config_values:
opt = "< hidden >"
elif raw:
opt = opt.replace("%", "%%")
if display_source:
opt = (opt, "env var")
section = section.lower()
# if we lower key for kubernetes_environment_variables section,
# then we won't be able to set any Airflow environment
# variables. Airflow only parse environment variables starts
# with AIRFLOW_. Therefore, we need to make it a special case.
if section != "kubernetes_environment_variables":
key = key.lower()
config_sources.setdefault(section, OrderedDict()).update({key: opt})
def _filter_by_source(
self,
config_sources: ConfigSourcesType,
display_source: bool,
getter_func,
):
"""
Deletes default configs from current configuration.
An OrderedDict of OrderedDicts, if it would conflict with special sensitive_config_values.
This is necessary because bare configs take precedence over the command
or secret key equivalents so if the current running config is
materialized with Airflow defaults they in turn override user set
command or secret key configs.
:param config_sources: The current configuration to operate on
:param display_source: If False, configuration options contain raw
values. If True, options are a tuple of (option_value, source).
Source is either 'airflow.cfg', 'default', 'env var', or 'cmd'.
:param getter_func: A callback function that gets the user configured
override value for a particular sensitive_config_values config.
:return: None, the given config_sources is filtered if necessary,
otherwise untouched.
"""
for section, key in self.sensitive_config_values:
# Don't bother if we don't have section / key
if section not in config_sources or key not in config_sources[section]:
continue
# Check that there is something to override defaults
try:
getter_opt = getter_func(section, key)
except ValueError:
continue
if not getter_opt:
continue
# Check to see that there is a default value
if not self.airflow_defaults.has_option(section, key):
continue
# Check to see if bare setting is the same as defaults
if display_source:
# when display_source = true, we know that the config_sources contains tuple
opt, source = config_sources[section][key] # type: ignore
else:
opt = config_sources[section][key]
if opt == self.airflow_defaults.get(section, key):
del config_sources[section][key]
@staticmethod
def _replace_config_with_display_sources(
config_sources: ConfigSourcesType,
configs: Iterable[tuple[str, ConfigParser]],
display_source: bool,
raw: bool,
deprecated_options: dict[tuple[str, str], tuple[str, str, str]],
include_env: bool,
include_cmds: bool,
include_secret: bool,
):
for source_name, config in configs:
for section in config.sections():
AirflowConfigParser._replace_section_config_with_display_sources(
config,
config_sources,
display_source,
raw,
section,
source_name,
deprecated_options,
configs,
include_env=include_env,
include_cmds=include_cmds,
include_secret=include_secret,
)
@staticmethod
def _deprecated_value_is_set_in_config(
deprecated_section: str,
deprecated_key: str,
configs: Iterable[tuple[str, ConfigParser]],
) -> bool:
for config_type, config in configs:
if config_type == "default":
continue
try:
deprecated_section_array = config.items(section=deprecated_section, raw=True)
for key_candidate, _ in deprecated_section_array:
if key_candidate == deprecated_key:
return True
except NoSectionError:
pass
return False
@staticmethod
def _deprecated_variable_is_set(deprecated_section: str, deprecated_key: str) -> bool:
return (
os.environ.get(f"{ENV_VAR_PREFIX}{deprecated_section.upper()}__{deprecated_key.upper()}")
is not None
)
@staticmethod
def _deprecated_command_is_set_in_config(
deprecated_section: str, deprecated_key: str, configs: Iterable[tuple[str, ConfigParser]]
) -> bool:
return AirflowConfigParser._deprecated_value_is_set_in_config(
deprecated_section=deprecated_section, deprecated_key=deprecated_key + "_cmd", configs=configs
)
@staticmethod
def _deprecated_variable_command_is_set(deprecated_section: str, deprecated_key: str) -> bool:
return (
os.environ.get(f"{ENV_VAR_PREFIX}{deprecated_section.upper()}__{deprecated_key.upper()}_CMD")
is not None
)
@staticmethod
def _deprecated_secret_is_set_in_config(
deprecated_section: str, deprecated_key: str, configs: Iterable[tuple[str, ConfigParser]]
) -> bool:
return AirflowConfigParser._deprecated_value_is_set_in_config(
deprecated_section=deprecated_section, deprecated_key=deprecated_key + "_secret", configs=configs
)
@staticmethod
def _deprecated_variable_secret_is_set(deprecated_section: str, deprecated_key: str) -> bool:
return (
os.environ.get(f"{ENV_VAR_PREFIX}{deprecated_section.upper()}__{deprecated_key.upper()}_SECRET")
is not None
)
@contextmanager
def suppress_future_warnings(self):
suppress_future_warnings = self._suppress_future_warnings
self._suppress_future_warnings = True
yield self
self._suppress_future_warnings = suppress_future_warnings
@staticmethod
def _replace_section_config_with_display_sources(
config: ConfigParser,
config_sources: ConfigSourcesType,
display_source: bool,
raw: bool,
section: str,
source_name: str,
deprecated_options: dict[tuple[str, str], tuple[str, str, str]],
configs: Iterable[tuple[str, ConfigParser]],
include_env: bool,
include_cmds: bool,
include_secret: bool,
):
sect = config_sources.setdefault(section, OrderedDict())
if isinstance(config, AirflowConfigParser):
with config.suppress_future_warnings():
items = config.items(section=section, raw=raw)
else:
items = config.items(section=section, raw=raw)
for k, val in items:
deprecated_section, deprecated_key, _ = deprecated_options.get((section, k), (None, None, None))
if deprecated_section and deprecated_key:
if source_name == "default":
# If deprecated entry has some non-default value set for any of the sources requested,
# We should NOT set default for the new entry (because it will override anything
# coming from the deprecated ones)
if AirflowConfigParser._deprecated_value_is_set_in_config(
deprecated_section, deprecated_key, configs
):
continue
if include_env and AirflowConfigParser._deprecated_variable_is_set(
deprecated_section, deprecated_key
):
continue
if include_cmds and (
AirflowConfigParser._deprecated_variable_command_is_set(
deprecated_section, deprecated_key
)
or AirflowConfigParser._deprecated_command_is_set_in_config(
deprecated_section, deprecated_key, configs
)
):
continue
if include_secret and (
AirflowConfigParser._deprecated_variable_secret_is_set(
deprecated_section, deprecated_key
)
or AirflowConfigParser._deprecated_secret_is_set_in_config(
deprecated_section, deprecated_key, configs
)
):
continue
if display_source:
sect[k] = (val, source_name)
else:
sect[k] = val
def load_test_config(self):
"""
Load the unit test configuration.
Note: this is not reversible.
"""
# remove all sections, falling back to defaults
for section in self.sections():
self.remove_section(section)
# then read test config
path = _default_config_file_path("default_test.cfg")
log.info("Reading default test configuration from %s", path)
self.read_string(_parameterized_config_from_template("default_test.cfg"))
# then read any "custom" test settings
log.info("Reading test configuration from %s", TEST_CONFIG_FILE)
self.read(TEST_CONFIG_FILE)
@staticmethod
def _warn_deprecate(
section: str, key: str, deprecated_section: str, deprecated_name: str, extra_stacklevel: int
):
if section == deprecated_section:
warnings.warn(
f"The {deprecated_name} option in [{section}] has been renamed to {key} - "
f"the old setting has been used, but please update your config.",
DeprecationWarning,
stacklevel=4 + extra_stacklevel,
)
else:
warnings.warn(
f"The {deprecated_name} option in [{deprecated_section}] has been moved to the {key} option "
f"in [{section}] - the old setting has been used, but please update your config.",
DeprecationWarning,
stacklevel=4 + extra_stacklevel,
)
def __getstate__(self):
return {
name: getattr(self, name)
for name in [
"_sections",
"is_validated",
"airflow_defaults",
]
}
def __setstate__(self, state):
self.__init__()
config = state.pop("_sections")
self.read_dict(config)
self.__dict__.update(state)
def get_airflow_home() -> str:
"""Get path to Airflow Home."""
return expand_env_var(os.environ.get("AIRFLOW_HOME", "~/airflow"))
def get_airflow_config(airflow_home) -> str:
"""Get Path to airflow.cfg path."""
airflow_config_var = os.environ.get("AIRFLOW_CONFIG")
if airflow_config_var is None:
return os.path.join(airflow_home, "airflow.cfg")
return expand_env_var(airflow_config_var)
def _parameterized_config_from_template(filename) -> str:
TEMPLATE_START = "# ----------------------- TEMPLATE BEGINS HERE -----------------------\n"
path = _default_config_file_path(filename)
with open(path) as fh:
for line in fh:
if line != TEMPLATE_START:
continue
return parameterized_config(fh.read().strip())
raise RuntimeError(f"Template marker not found in {path!r}")
def parameterized_config(template) -> str:
"""
Generates configuration from provided template & variables defined in current scope.
:param template: a config content templated with {{variables}}
"""
all_vars = {k: v for d in [globals(), locals()] for k, v in d.items()}
return template.format(**all_vars)
def get_airflow_test_config(airflow_home) -> str:
"""Get path to unittests.cfg."""
if "AIRFLOW_TEST_CONFIG" not in os.environ:
return os.path.join(airflow_home, "unittests.cfg")
# It will never return None
return expand_env_var(os.environ["AIRFLOW_TEST_CONFIG"]) # type: ignore[return-value]
def _generate_fernet_key() -> str:
from cryptography.fernet import Fernet
return Fernet.generate_key().decode()
def initialize_config() -> AirflowConfigParser:
"""
Load the Airflow config files.
Called for you automatically as part of the Airflow boot process.
"""
global FERNET_KEY, AIRFLOW_HOME, WEBSERVER_CONFIG
default_config = _parameterized_config_from_template("default_airflow.cfg")
local_conf = AirflowConfigParser(default_config=default_config)
if local_conf.getboolean("core", "unit_test_mode"):
# Load test config only
if not os.path.isfile(TEST_CONFIG_FILE):
from cryptography.fernet import Fernet
log.info("Creating new Airflow config file for unit tests in: %s", TEST_CONFIG_FILE)
pathlib.Path(AIRFLOW_HOME).mkdir(parents=True, exist_ok=True)
FERNET_KEY = Fernet.generate_key().decode()
with open(TEST_CONFIG_FILE, "w") as file:
cfg = _parameterized_config_from_template("default_test.cfg")
file.write(cfg)
make_group_other_inaccessible(TEST_CONFIG_FILE)
local_conf.load_test_config()
else:
# Load normal config
if not os.path.isfile(AIRFLOW_CONFIG):
from cryptography.fernet import Fernet
log.info("Creating new Airflow config file in: %s", AIRFLOW_CONFIG)
pathlib.Path(AIRFLOW_HOME).mkdir(parents=True, exist_ok=True)
FERNET_KEY = Fernet.generate_key().decode()
with open(AIRFLOW_CONFIG, "w") as file:
file.write(default_config)
make_group_other_inaccessible(AIRFLOW_CONFIG)
log.info("Reading the config from %s", AIRFLOW_CONFIG)
local_conf.read(AIRFLOW_CONFIG)
if local_conf.has_option("core", "AIRFLOW_HOME"):
msg = (
"Specifying both AIRFLOW_HOME environment variable and airflow_home "
"in the config file is deprecated. Please use only the AIRFLOW_HOME "
"environment variable and remove the config file entry."
)
if "AIRFLOW_HOME" in os.environ:
warnings.warn(msg, category=DeprecationWarning)
elif local_conf.get("core", "airflow_home") == AIRFLOW_HOME:
warnings.warn(
"Specifying airflow_home in the config file is deprecated. As you "
"have left it at the default value you should remove the setting "
"from your airflow.cfg and suffer no change in behaviour.",
category=DeprecationWarning,
)
else:
# there
AIRFLOW_HOME = local_conf.get("core", "airflow_home") # type: ignore[assignment]
warnings.warn(msg, category=DeprecationWarning)
# They _might_ have set unit_test_mode in the airflow.cfg, we still
# want to respect that and then load the unittests.cfg
if local_conf.getboolean("core", "unit_test_mode"):
local_conf.load_test_config()
WEBSERVER_CONFIG = local_conf.get("webserver", "config_file")
if not os.path.isfile(WEBSERVER_CONFIG):
import shutil
log.info("Creating new FAB webserver config file in: %s", WEBSERVER_CONFIG)
shutil.copy(_default_config_file_path("default_webserver_config.py"), WEBSERVER_CONFIG)
return local_conf
def make_group_other_inaccessible(file_path: str):
try:
permissions = os.stat(file_path)
os.chmod(file_path, permissions.st_mode & (stat.S_IRUSR | stat.S_IWUSR))
except Exception as e:
log.warning(
"Could not change permissions of config file to be group/other inaccessible. "
"Continuing with original permissions:",
e,
)
# Historical convenience functions to access config entries
def load_test_config():
"""Historical load_test_config."""
warnings.warn(
"Accessing configuration method 'load_test_config' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.load_test_config'",
DeprecationWarning,
stacklevel=2,
)
conf.load_test_config()
def get(*args, **kwargs) -> ConfigType | None:
"""Historical get."""
warnings.warn(
"Accessing configuration method 'get' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.get'",
DeprecationWarning,
stacklevel=2,
)
return conf.get(*args, **kwargs)
def getboolean(*args, **kwargs) -> bool:
"""Historical getboolean."""
warnings.warn(
"Accessing configuration method 'getboolean' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.getboolean'",
DeprecationWarning,
stacklevel=2,
)
return conf.getboolean(*args, **kwargs)
def getfloat(*args, **kwargs) -> float:
"""Historical getfloat."""
warnings.warn(
"Accessing configuration method 'getfloat' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.getfloat'",
DeprecationWarning,
stacklevel=2,
)
return conf.getfloat(*args, **kwargs)
def getint(*args, **kwargs) -> int:
"""Historical getint."""
warnings.warn(
"Accessing configuration method 'getint' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.getint'",
DeprecationWarning,
stacklevel=2,
)
return conf.getint(*args, **kwargs)
def getsection(*args, **kwargs) -> ConfigOptionsDictType | None:
"""Historical getsection."""
warnings.warn(
"Accessing configuration method 'getsection' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.getsection'",
DeprecationWarning,
stacklevel=2,
)
return conf.getsection(*args, **kwargs)
def has_option(*args, **kwargs) -> bool:
"""Historical has_option."""
warnings.warn(
"Accessing configuration method 'has_option' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.has_option'",
DeprecationWarning,
stacklevel=2,
)
return conf.has_option(*args, **kwargs)
def remove_option(*args, **kwargs) -> bool:
"""Historical remove_option."""
warnings.warn(
"Accessing configuration method 'remove_option' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.remove_option'",
DeprecationWarning,
stacklevel=2,
)
return conf.remove_option(*args, **kwargs)
def as_dict(*args, **kwargs) -> ConfigSourcesType:
"""Historical as_dict."""
warnings.warn(
"Accessing configuration method 'as_dict' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.as_dict'",
DeprecationWarning,
stacklevel=2,
)
return conf.as_dict(*args, **kwargs)
def set(*args, **kwargs) -> None:
"""Historical set."""
warnings.warn(
"Accessing configuration method 'set' directly from the configuration module is "
"deprecated. Please access the configuration from the 'configuration.conf' object via "
"'conf.set'",
DeprecationWarning,
stacklevel=2,
)
conf.set(*args, **kwargs)
def ensure_secrets_loaded() -> list[BaseSecretsBackend]:
"""
Ensure that all secrets backends are loaded.
If the secrets_backend_list contains only 2 default backends, reload it.
"""
# Check if the secrets_backend_list contains only 2 default backends
if len(secrets_backend_list) == 2:
return initialize_secrets_backends()
return secrets_backend_list
def get_custom_secret_backend() -> BaseSecretsBackend | None:
"""Get Secret Backend if defined in airflow.cfg."""
secrets_backend_cls = conf.getimport(section="secrets", key="backend")
if not secrets_backend_cls:
return None
try:
backend_kwargs = conf.getjson(section="secrets", key="backend_kwargs")
if not backend_kwargs:
backend_kwargs = {}
elif not isinstance(backend_kwargs, dict):
raise ValueError("not a dict")
except AirflowConfigException:
log.warning("Failed to parse [secrets] backend_kwargs as JSON, defaulting to no kwargs.")
backend_kwargs = {}
except ValueError:
log.warning("Failed to parse [secrets] backend_kwargs into a dict, defaulting to no kwargs.")
backend_kwargs = {}
return secrets_backend_cls(**backend_kwargs)
def initialize_secrets_backends() -> list[BaseSecretsBackend]:
"""
Initialize secrets backend.
* import secrets backend classes
* instantiate them and return them in a list
"""
backend_list = []
custom_secret_backend = get_custom_secret_backend()
if custom_secret_backend is not None:
backend_list.append(custom_secret_backend)
for class_name in DEFAULT_SECRETS_SEARCH_PATH:
secrets_backend_cls = import_string(class_name)
backend_list.append(secrets_backend_cls())
return backend_list
def initialize_auth_manager() -> BaseAuthManager:
"""
Initialize auth manager.
* import user manager class
* instantiate it and return it
"""
auth_manager_cls = conf.getimport(section="core", key="auth_manager")
if not auth_manager_cls:
raise AirflowConfigException(
"No auth manager defined in the config. "
"Please specify one using section/key [core/auth_manager]."
)
return auth_manager_cls()
@functools.lru_cache(maxsize=None)
def _DEFAULT_CONFIG() -> str:
path = _default_config_file_path("default_airflow.cfg")
with open(path) as fh:
return fh.read()
@functools.lru_cache(maxsize=None)
def _TEST_CONFIG() -> str:
path = _default_config_file_path("default_test.cfg")
with open(path) as fh:
return fh.read()
_deprecated = {
"DEFAULT_CONFIG": _DEFAULT_CONFIG,
"TEST_CONFIG": _TEST_CONFIG,
"TEST_CONFIG_FILE_PATH": functools.partial(_default_config_file_path, "default_test.cfg"),
"DEFAULT_CONFIG_FILE_PATH": functools.partial(_default_config_file_path, "default_airflow.cfg"),
}
def __getattr__(name):
if name in _deprecated:
warnings.warn(
f"{__name__}.{name} is deprecated and will be removed in future",
DeprecationWarning,
stacklevel=2,
)
return _deprecated[name]()
raise AttributeError(f"module {__name__} has no attribute {name}")
# Setting AIRFLOW_HOME and AIRFLOW_CONFIG from environment variables, using
# "~/airflow" and "$AIRFLOW_HOME/airflow.cfg" respectively as defaults.
AIRFLOW_HOME = get_airflow_home()
AIRFLOW_CONFIG = get_airflow_config(AIRFLOW_HOME)
# Set up dags folder for unit tests
# this directory won't exist if users install via pip
_TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "tests", "dags"
)
if os.path.exists(_TEST_DAGS_FOLDER):
TEST_DAGS_FOLDER = _TEST_DAGS_FOLDER
else:
TEST_DAGS_FOLDER = os.path.join(AIRFLOW_HOME, "dags")
# Set up plugins folder for unit tests
_TEST_PLUGINS_FOLDER = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "tests", "plugins"
)
if os.path.exists(_TEST_PLUGINS_FOLDER):
TEST_PLUGINS_FOLDER = _TEST_PLUGINS_FOLDER
else:
TEST_PLUGINS_FOLDER = os.path.join(AIRFLOW_HOME, "plugins")
TEST_CONFIG_FILE = get_airflow_test_config(AIRFLOW_HOME)
SECRET_KEY = b64encode(os.urandom(16)).decode("utf-8")
FERNET_KEY = "" # Set only if needed when generating a new file
WEBSERVER_CONFIG = "" # Set by initialize_config
conf = initialize_config()
secrets_backend_list = initialize_secrets_backends()
auth_manager = initialize_auth_manager()
conf.validate()
| 76,033 | 40.525942 | 109 | py |
CLIP2Scene | CLIP2Scene-main/downstream.py | import os
import gc
import argparse
import MinkowskiEngine as ME
import pytorch_lightning as pl
from downstream.evaluate import evaluate
from utils.read_config import generate_config
from downstream.model_builder import make_model
from pytorch_lightning.plugins import DDPPlugin
from downstream.lightning_trainer import LightningDownstream
from downstream.lightning_datamodule import DownstreamDataModule
from downstream.dataloader_kitti import make_data_loader as make_data_loader_kitti
from downstream.dataloader_nuscenes import make_data_loader as make_data_loader_nuscenes
from downstream.dataloader_scannet import make_data_loader as make_data_loader_scannet
def main():
"""
Code for launching the downstream training
"""
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg_file", type=str, default="config/semseg_nuscenes.yaml", help="specify the config for training"
)
parser.add_argument(
"--resume_path", type=str, default=None, help="provide a path to resume an incomplete training"
)
parser.add_argument(
"--pretraining_path", type=str, default=None, help="provide a path to pre-trained weights"
)
args = parser.parse_args()
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
if args.pretraining_path:
config['pretraining_path'] = args.pretraining_path
if os.environ.get("LOCAL_RANK", 0) == 0:
print(
"\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items())))
)
dm = DownstreamDataModule(config)
model = make_model(config, config["pretraining_path"])
if config["num_gpus"] > 1:
model = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model)
module = LightningDownstream(model, config)
path = os.path.join(config["working_dir"], config["datetime"])
trainer = pl.Trainer(
gpus=config["num_gpus"],
accelerator="ddp",
default_root_dir=path,
checkpoint_callback=True,
max_epochs=config["num_epochs"],
plugins=DDPPlugin(find_unused_parameters=True),
num_sanity_val_steps=0,
resume_from_checkpoint=config["resume_path"],
check_val_every_n_epoch=10,
)
print("Starting the training")
trainer.fit(module, dm)
print("Training finished, now evaluating the results")
del trainer
del dm
del module
gc.collect()
if config["dataset"].lower() == "nuscenes":
phase = "verifying" if config['training'] in ("parametrize", "parametrizing") else "val"
val_dataloader = make_data_loader_nuscenes(
config, phase, num_threads=config["num_threads"]
)
elif config["dataset"].lower() == "kitti":
val_dataloader = make_data_loader_kitti(
config, "val", num_threads=config["num_threads"]
)
elif config["dataset"].lower() == "scannet":
val_dataloader = make_data_loader_scannet(
config, "val", num_threads=config["num_threads"]
)
evaluate(model.to(0), val_dataloader, config)
if __name__ == "__main__":
main()
| 3,175 | 36.809524 | 109 | py |
CLIP2Scene | CLIP2Scene-main/pretrain.py | import os
import argparse
import torch.nn as nn
# import MinkowskiEngine as ME
import pytorch_lightning as pl
from utils.read_config import generate_config
from pretrain.model_builder import make_model
from pytorch_lightning.plugins import DDPPlugin
from pretrain.lightning_trainer import LightningPretrain
from pretrain.lightning_datamodule import PretrainDataModule
from pretrain.lightning_trainer_spconv import LightningPretrainSpconv
def main():
"""
Code for launching the pretraining
"""
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg_file", type=str, default="config/slidr_minkunet.yaml", help="specify the config for training"
)
parser.add_argument(
"--resume_path", type=str, default=None, help="provide a path to resume an incomplete training"
)
args = parser.parse_args()
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
if os.environ.get("LOCAL_RANK", 0) == 0:
print(
"\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items())))
)
dm = PretrainDataModule(config)
model_points, model_images, model_fusion = make_model(config)
if config["num_gpus"] > 1:
# model_points = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model_points)
model_images = nn.SyncBatchNorm.convert_sync_batchnorm(model_images)
model_points = model_points #nn.SyncBatchNorm.convert_sync_batchnorm(model_points)
model_fusion = nn.SyncBatchNorm.convert_sync_batchnorm(model_fusion)
if config["model_points"] == "minkunet":
module = LightningPretrain(model_points, model_images, model_fusion, config)
elif config["model_points"] == "voxelnet":
module = LightningPretrainSpconv(model_points, model_images, config)
path = os.path.join(config["working_dir"], config["datetime"])
trainer = pl.Trainer(
gpus=config["num_gpus"],
accelerator="ddp",
default_root_dir=path,
checkpoint_callback=True,
max_epochs=config["num_epochs"],
plugins=DDPPlugin(find_unused_parameters=True),
num_sanity_val_steps=0,
resume_from_checkpoint=config["resume_path"],
check_val_every_n_epoch=10,
)
print("Starting the training")
trainer.fit(module, dm)
if __name__ == "__main__":
main()
| 2,421 | 36.261538 | 108 | py |
CLIP2Scene | CLIP2Scene-main/evaluate.py | import torch
import argparse
from downstream.evaluate import evaluate
from utils.read_config import generate_config
from downstream.model_builder import make_model
from downstream.dataloader_kitti import make_data_loader as make_data_loader_kitti
from downstream.dataloader_nuscenes import make_data_loader as make_data_loader_nuscenes
def main():
"""
Code for launching the downstream evaluation
"""
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument(
"--cfg_file", type=str, default=None, help="specify the config for training"
)
parser.add_argument(
"--resume_path", type=str, default=None, help="provide a path to resume an incomplete training"
)
parser.add_argument(
"--dataset", type=str, default=None, help="Choose between nuScenes and KITTI"
)
args = parser.parse_args()
if args.cfg_file is None and args.dataset is not None:
if args.dataset.lower() == "kitti":
args.cfg_file = "config/semseg_kitti.yaml"
elif args.dataset.lower() == "nuscenes":
args.cfg_file = "config/semseg_nuscenes.yaml"
else:
raise Exception(f"Dataset not recognized: {args.dataset}")
elif args.cfg_file is None:
args.cfg_file = "config/semseg_nuscenes.yaml"
config = generate_config(args.cfg_file)
if args.resume_path:
config['resume_path'] = args.resume_path
print("\n" + "\n".join(list(map(lambda x: f"{x[0]:20}: {x[1]}", config.items()))))
print("Creating the loaders")
if config["dataset"].lower() == "nuscenes":
phase = "verifying" if config['training'] in ("parametrize", "parametrizing") else "val"
val_dataloader = make_data_loader_nuscenes(
config, phase, num_threads=config["num_threads"]
)
elif config["dataset"].lower() == "kitti":
val_dataloader = make_data_loader_kitti(
config, "val", num_threads=config["num_threads"]
)
else:
raise Exception(f"Dataset not recognized: {args.dataset}")
print("Creating the model")
model = make_model(config, config["pretraining_path"]).to(0)
checkpoint = torch.load(config["resume_path"], map_location=torch.device(0))
if "config" in checkpoint:
for cfg in ("voxel_size", "cylindrical_coordinates"):
assert checkpoint["config"][cfg] == config[cfg], (
f"{cfg} is not consistant.\n"
f"Checkpoint: {checkpoint['config'][cfg]}\n"
f"Config: {config[cfg]}."
)
try:
model.load_state_dict(checkpoint["model_points"])
except KeyError:
weights = {
k.replace("model.", ""): v
for k, v in checkpoint["state_dict"].items()
if k.startswith("model.")
}
model.load_state_dict(weights)
evaluate(model, val_dataloader, config)
if __name__ == "__main__":
main()
| 2,938 | 37.671053 | 103 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/dataloader_scannet.py | import os
import copy
import torch
import numpy as np
from PIL import Image
import MinkowskiEngine as ME
from torch.utils.data import Dataset
# import pc_utils
from plyfile import PlyData, PlyElement
import math
# from pc_utils import write_ply_rgb
import sys
sys.path.append("..")
# from MinkowskiEngine.utils import sparse_quantize
import imageio
import cv2
import random
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
colors = [(colors[i, 0], colors[i, 1], colors[i, 2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def scannet_collate_pair_fn(batch):
(
coords,
feats,
labels,
imgs,
pairing_points,
pairing_images,
inverse_indexes,
scan_names,
) = list(zip(*batch))
offset_point = 0
offset_image = 0
for batch_id in range(len(coords)):
pairing_points[batch_id][:] += offset_point
offset_point += coords[batch_id].shape[0]
pairing_images[batch_id][:, 0] += offset_image
offset_image += imgs[batch_id].shape[0]
coords = ME.utils.batched_coordinates(coords, dtype=torch.float32)
feats = torch.cat(feats, dim=0)
imgs = torch.cat(imgs, dim=0)
pairing_points = torch.cat(pairing_points, dim=0)
pairing_images = torch.cat(pairing_images, dim=0)
return {
"sinput_C": coords,
"sinput_F": feats,
"input_I": imgs,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"inverse_indexes": inverse_indexes,
}
class scannet_Dataset(Dataset):
def __init__(self, phase, config, shuffle = True, cloud_transforms = None, mixed_transforms = None):
self.scannet_root_dir = config['dataRoot_scannet']
if phase == 'train':
self.scannet_file_list = self.read_files(config['train_file'])
else:
self.scannet_file_list = self.read_files(config['val_file'])
self.mixed_transforms = mixed_transforms
self.voxel_size = config['voxel_size']
self.phase = phase
self.config = config
self.imageDim = (640, 480)
# self.imageDim = (224, 416)
self.cloud_transforms = cloud_transforms
self.maxImages = 8
def read_files(self, file):
f = open(file)
lines = f.readlines()
name_list = [line.split('.')[0] for line in lines]
f.close()
return name_list
def __len__(self):
return len(self.scannet_file_list)
def read_pose_file(self, fname):
posemat = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return posemat
def read_intrinsic_file(self, fname):
intrinsic = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return intrinsic
def read_txt(self, path):
# Read txt file into lines.
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def computeLinking(self, camera_to_world, coords, depth, link_proj_threshold, intrinsic_color, intrinsic_depth, imageDim):
"""
:param camera_to_world: 4 x 4
:param coords: N x 3 format
:param depth: H x W format
:intrinsic_depth: 4 x 4
:intrinsic_color: 4 x 4, not used currently
:return: linking, N x 3 format, (H,W,mask)
"""
# print("imageDim ", imageDim)
intrinsic = intrinsic_depth
link = np.zeros((3, coords.shape[0]), dtype=float)
coordsNew = np.concatenate([coords, np.ones([coords.shape[0], 1])], axis=1).T #4 x N
assert coordsNew.shape[0] == 4, "[!] Shape error"
world_to_camera = np.linalg.inv(camera_to_world) # 4 x 4
p = np.matmul(world_to_camera, coordsNew) # 4 x N
p[0] = (p[0] * intrinsic[0][0]) / p[2] + intrinsic[0][2]
p[1] = (p[1] * intrinsic[1][1]) / p[2] + intrinsic[1][2]
pi = p
inside_mask = (pi[0] >= 0) * (pi[1] >= 0) * (pi[0] <= imageDim[1] - 1) * (pi[1] <= imageDim[0]-1)
occlusion_mask = np.abs(depth[np.round(pi[1][inside_mask]).astype(np.int), np.round(pi[0][inside_mask]).astype(np.int)] - p[2][inside_mask]) <= link_proj_threshold
inside_mask[inside_mask == True] = occlusion_mask
link[0][inside_mask] = pi[1][inside_mask]
link[1][inside_mask] = pi[0][inside_mask]
link[2][inside_mask] = 1
return link.T
def __getitem__(self, idx):
path = os.path.join(self.scannet_root_dir, self.scannet_file_list[idx], self.scannet_file_list[idx]+"_new_semantic.npy")
data = torch.from_numpy(np.load(path))
coords, feats, labels = data[:, :3], data[:, 3: 6], data[:, 9:]
sceneName = self.scannet_file_list[idx]
feats = feats / 127.5 - 1
frame_names = []
imgs = []
links = []
intrinsic_color = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_color.txt'))
intrinsic_depth = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_depth.txt'))
for framename in os.listdir(os.path.join(self.config['dataRoot_images'], sceneName, 'color')):
frame_names.append(framename.split('.')[0])
pairing_points = []
pairing_images = []
frame_names = random.sample(frame_names, min(self.maxImages, len(frame_names)))
for i, frameid in enumerate(frame_names):
f = os.path.join(self.config['dataRoot_images'], sceneName, 'color', frameid + '.jpg')
img = imageio.imread(f) / 255
img = cv2.resize(img, self.imageDim)
depth = imageio.imread(f.replace('color', 'depth').replace('.jpg', '.png')) / 1000.0 # convert to meter
posePath = f.replace('color', 'pose').replace('.jpg', '.txt')
pose = self.read_pose_file(posePath)
link = self.computeLinking(pose, coords, depth, 0.05, intrinsic_color, intrinsic_depth, depth.shape)
pairing_point = torch.from_numpy(np.argwhere(link[:, 2] == 1)).squeeze()
pairing_points.append(pairing_point)
link = torch.from_numpy(link).int()
imgs.append(torch.from_numpy(img.transpose((2, 0, 1))))
pairing_image = link[pairing_point, :2]
pairing_images.append(torch.cat((torch.ones(pairing_point.shape[0], 1) * i,
pairing_image), dim=1))
imgs = torch.stack(imgs)
pairing_points = torch.cat(pairing_points, dim=0).numpy()
pairing_images = torch.cat(pairing_images, dim=0).numpy()
if self.cloud_transforms:
coords = self.cloud_transforms(coords.float())
if self.mixed_transforms:
(
coords_b,
feats_b,
imgs_b,
pairing_points_b,
pairing_images_b,
) = self.mixed_transforms(
coords, feats, imgs, pairing_points, pairing_images
)
coords, feats, imgs, pairing_points, pairing_images = coords_b, feats_b, imgs_b, torch.from_numpy(pairing_points_b),\
torch.from_numpy(pairing_images_b)
coords = (coords - coords.mean(0)) / self.voxel_size
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
# indexes here are the indexes of points kept after the voxelization
pairing_points = inverse_indexes[pairing_points]
feats = feats[indexes]
assert pairing_points.shape[0] == pairing_images.shape[0]
packages = (discrete_coords, feats, labels, imgs, pairing_points, pairing_images, inverse_indexes, self.scannet_file_list[idx])
return packages
| 8,764 | 35.67364 | 171 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/lightning_datamodule.py | import torch
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from pretrain.dataloader_nuscenes import (
NuScenesMatchDataset,
minkunet_collate_pair_fn,
)
from pretrain.dataloader_kitti import (
KittiMatchDataset,
kitti_collate_pair_fn,
)
from pretrain.dataloader_scannet import (
scannet_Dataset,
scannet_collate_pair_fn,
)
# try:
# from pretrain.dataloader_scannet import (
# scannet_Dataset,
# scannet_collate_pair_fn,
# )
# except ImportError:
# scannet_Dataset = None
# scannet_collate_pair_fn = None
try:
from pretrain.dataloader_nuscenes_spconv import NuScenesMatchDatasetSpconv, spconv_collate_pair_fn
except ImportError:
NuScenesMatchDatasetSpconv = None
spconv_collate_pair_fn = None
from utils.transforms import (
make_transforms_clouds,
make_transforms_asymmetrical,
make_transforms_asymmetrical_val,
)
class PretrainDataModule(pl.LightningDataModule):
def __init__(self, config):
super().__init__()
self.config = config
if config["num_gpus"]:
self.batch_size = config["batch_size"] // config["num_gpus"]
else:
self.batch_size = config["batch_size"]
def setup(self, stage):
cloud_transforms_train = make_transforms_clouds(self.config)
mixed_transforms_train = make_transforms_asymmetrical(self.config)
cloud_transforms_val = None
mixed_transforms_val = make_transforms_asymmetrical_val(self.config)
if self.config["dataset"].lower() == "nuscenes" and self.config["model_points"] == "minkunet":
Dataset = NuScenesMatchDataset
elif self.config["dataset"].lower() == "kitti":
Dataset = KittiMatchDataset
elif self.config["dataset"].lower() == "scannet":
Dataset = scannet_Dataset
elif self.config["dataset"].lower() == "nuscenes" and self.config["model_points"] == "voxelnet":
Dataset = NuScenesMatchDatasetSpconv
else:
raise Exception("Dataset Unknown")
# print(self.config["dataset"].lower())
# print(type(Dataset))
if self.config["training"] in ("parametrize", "parametrizing"):
phase_train = "parametrizing"
phase_val = "verifying"
else:
phase_train = "train"
phase_val = "val"
self.train_dataset = Dataset(
phase=phase_train,
config=self.config,
shuffle=True,
cloud_transforms=cloud_transforms_train,
mixed_transforms=mixed_transforms_train,
)
print("Dataset Loaded")
print("training size: ", len(self.train_dataset))
if self.config["dataset"].lower() == "nuscenes":
self.val_dataset = Dataset(
phase=phase_val,
shuffle=False,
cloud_transforms=cloud_transforms_val,
mixed_transforms=mixed_transforms_val,
config=self.config,
cached_nuscenes=self.train_dataset.nusc,
# cached_nuscenes=None,
)
else:
self.val_dataset = Dataset(
phase=phase_val,
shuffle=False,
cloud_transforms=cloud_transforms_val,
mixed_transforms=mixed_transforms_val,
config=self.config,
# cached_nuscenes=self.train_dataset.nusc,
# cached_nuscenes=None,
)
print("validation size: ", len(self.val_dataset))
def train_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=True,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
def val_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=False,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
| 5,540 | 33.203704 | 104 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/lightning_trainer.py | import os
import re
import torch
import numpy as np
import torch.optim as optim
import MinkowskiEngine as ME
import pytorch_lightning as pl
from utils.chamfer_distance import ComputeCDLoss
from pretrain.criterion import NCELoss, DistillKL, semantic_NCELoss
from pytorch_lightning.utilities import rank_zero_only
from torchsparse import SparseTensor as spvcnn_SparseTensor
from torch import nn
import torch.nn.functional as F
import random
import numba as nb
@nb.jit()
def nb_pack(counts):
return [np.array(list(range(i))) for i in counts]
class LightningPretrain(pl.LightningModule):
def __init__(self, model_points, model_images, model_fusion, config):
super().__init__()
self.model_points = model_points
self.model_images = model_images
self.model_fusion = model_fusion
self._config = config
self.losses = config["losses"]
self.train_losses = []
self.val_losses = []
self.num_matches = config["num_matches"]
self.batch_size = config["batch_size"]
self.num_epochs = config["num_epochs"]
self.superpixel_size = config["superpixel_size"]
self.epoch = 0
self.cot = 0
self.CE = nn.CrossEntropyLoss()
self.CD_loss = ComputeCDLoss()
self.KLloss = DistillKL(T=1)
if config["resume_path"] is not None:
self.epoch = int(
re.search(r"(?<=epoch=)[0-9]+", config["resume_path"])[0]
)
self.criterion = NCELoss(temperature=config["NCE_temperature"])
self.sem_NCE = semantic_NCELoss(temperature=config["NCE_temperature"])
self.working_dir = os.path.join(config["working_dir"], config["datetime"])
if os.environ.get("LOCAL_RANK", 0) == 0:
os.makedirs(self.working_dir, exist_ok=True)
self.text_embeddings_path = config['text_embeddings_path']
text_categories = config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.saved = False
self.max_size = 8
def get_in_field(self, coords, feats):
in_field = ME.TensorField(coordinates=coords.float(), features=feats.int(),
# coordinate_map_key=A.coordiante_map_key, coordinate_manager=A.coordinate_manager,
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
# minkowski_algorithm=ME.MinkowskiAlgorithm.MEMORY_EFFICIENT,
# device=self.config.device,
).float()
return in_field
def configure_optimizers(self):
optimizer = optim.SGD(
list(self.model_points.parameters()) + list(self.model_images.parameters()) + list(self.model_fusion.parameters()),
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.num_epochs)
return [optimizer], [scheduler]
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad(set_to_none=True)
def training_step(self, batch, batch_idx):
self.model_points.train()
sinput_C = batch["sinput_C"]
sinput_F = batch["sinput_F"]
if self._config['dataset'] == "nuscenes":
sweepIds = batch["sweepIds"]
if self._config['max_sweeps'] > 1:
for sweepid in range(1, self._config['max_sweeps']):
sweepInd = sweepIds == sweepid
sinput_C[sweepInd, -1] = sinput_C[sweepInd, -1] + self._config['batch_size'] * sweepid
if self._config['dataset'] == "scannet":
sparse_input = ME.SparseTensor(sinput_F.float(), coordinates=sinput_C.int())
else:
sparse_input = spvcnn_SparseTensor(sinput_F, sinput_C)
output_points = self.model_points(sparse_input)
output_images = self.model_images(batch["input_I"].float())
del batch["sinput_F"]
del batch["sinput_C"]
del batch["input_I"]
del sparse_input
# each loss is applied independtly on each GPU
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
torch.cuda.empty_cache()
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, batch_size=self.batch_size
)
if not self.saved:
if self.epoch == 10:
self.save()
self.saved = True
self.train_losses.append(loss.detach().cpu())
return loss
def scannet_loss(self, batch, output_points, output_images):
# output_images.shape: torch.Size([96, 64, 224, 416])
# output_points.shape: torch.Size([225648, 64])
# pairing_points.shape: torch.Size([214155])
# pairing_images.shape: torch.Size([214155, 3])
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
image_feats, image_pred = output_images
point_feats_a, point_feats_b = output_points
# global
point_logists = F.conv1d(point_feats_a.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
k_logists = point_logists[pairing_points]
m_pred = tuple(pairing_images.T.long())
q_pred = image_pred[m_pred]
# switchable training strategy
if self.epoch >= 10:
rd = random.randint(1, 10)
if rd > 5: q_pred = k_logists.argmax(dim=1)
loss_semantic = self.CE(k_logists, q_pred)
point_feats_b = point_feats_b[pairing_points]
image_feats = image_feats.permute(0, 2, 3, 1)[m_pred]
loss_spatial = torch.mean(1 - F.cosine_similarity(image_feats, point_feats_b, dim=1))
return loss_semantic + loss_spatial
def feature_packaging(self, image_global_allpoints, point_global_allpoints, inverse_indexes_merged, image_pred):
uni_feature = torch.cat((image_global_allpoints, point_global_allpoints, image_pred.unsqueeze(-1)), dim=1)
max_inverse_indexes = inverse_indexes_merged.max()
feature_packages = torch.zeros((max_inverse_indexes + 1) * self.max_size, uni_feature.shape[1]).cuda()
sorted_inverse_indexes, sorted_indices = torch.sort(inverse_indexes_merged)
uni_feature = uni_feature[sorted_indices]
_, counts = torch.unique(sorted_inverse_indexes, return_counts=True)
offset = nb_pack(counts.detach().cpu().numpy())
offset = torch.from_numpy(np.concatenate(offset, axis=0)).cuda()
valid_index = offset < self.max_size
offset = offset[valid_index]
sorted_inverse_indexes = sorted_inverse_indexes[valid_index]
uni_feature = uni_feature[valid_index]
index = sorted_inverse_indexes * self.max_size + offset
feature_packages[index] = uni_feature
feature_packages = feature_packages.view((max_inverse_indexes + 1), self.max_size, uni_feature.shape[1])
return feature_packages
def loss_nuscenes(self, batch, output_points, output_images):
# output_images.shape: torch.Size([96, 64, 224, 416])
# output_points.shape: torch.Size([225648, 64])
# pairing_points.shape: torch.Size([214155])
# pairing_images.shape: torch.Size([214155, 3])
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
inverse_indexes_group = batch["inverse_indexes_group"]
inverse_indexes_merged = batch['inverse_indexes_merged']
image_global, image_pred = output_images
point_local, point_global = output_points
point_local = point_local[inverse_indexes_group]
point_local_allpoints = point_local[pairing_points]
point_global = point_global[inverse_indexes_group]
point_global_allpoints = point_global[pairing_points]
inverse_indexes_merged = inverse_indexes_merged[pairing_points]
m_pred = tuple(pairing_images.T.long())
image_global_allpoints = image_global.permute(0, 2, 3, 1)[m_pred]
image_pred = image_pred[m_pred]
feature_packages = self.feature_packaging(image_global_allpoints, point_local_allpoints, inverse_indexes_merged, image_pred)
super_nodes_points, inner_products, pixel_pred = self.model_fusion(feature_packages)
super_nodes_logit = F.conv1d(point_global_allpoints.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
loss_semantic = 0
# Switchable Self-training Strategy
if self.epoch > 10:
index_set = set(np.array(list(range(inverse_indexes_group.shape[0]))))
pairing_set = set(pairing_points.detach().long().cpu().numpy())
index_set_rest = list(index_set - pairing_set)
point_global_rest = point_global[index_set_rest]
point_global_logits = F.conv1d(point_global_rest.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
point_global_pred = point_global_logits.argmax(dim=1)
loss_semantic += self.CE(point_global_logits, point_global_pred)
rd = random.randint(1, 10)
if rd > 5: image_pred = super_nodes_logit.argmax(dim=1)
loss_semantic = self.CE(super_nodes_logit, image_pred)
loss_spatial_temporal = torch.mean(1 - inner_products)
return loss_semantic + loss_spatial_temporal
def loss(self, batch, output_points, output_images):
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
idx = np.random.choice(pairing_points.shape[0], self.num_matches, replace=False)
k = output_points[pairing_points[idx]]
m = tuple(pairing_images[idx].T.long())
q = output_images.permute(0, 2, 3, 1)[m]
return self.criterion(k, q)
def loss_superpixels_average(self, batch, output_points, output_images):
# compute a superpoints to superpixels loss using superpixels
torch.cuda.empty_cache() # This method is extremely memory intensive
superpixels = batch["superpixels"]
pairing_images = batch["pairing_images"]
pairing_points = batch["pairing_points"]
superpixels = (
torch.arange(
0,
output_images.shape[0] * self.superpixel_size,
self.superpixel_size,
device=self.device,
)[:, None, None] + superpixels
)
m = tuple(pairing_images.cpu().T.long())
superpixels_I = superpixels.flatten()
idx_P = torch.arange(pairing_points.shape[0], device=superpixels.device)
total_pixels = superpixels_I.shape[0]
idx_I = torch.arange(total_pixels, device=superpixels.device)
with torch.no_grad():
one_hot_P = torch.sparse_coo_tensor(
torch.stack((
superpixels[m], idx_P
), dim=0),
torch.ones(pairing_points.shape[0], device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, pairing_points.shape[0])
)
one_hot_I = torch.sparse_coo_tensor(
torch.stack((
superpixels_I, idx_I
), dim=0),
torch.ones(total_pixels, device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, total_pixels)
)
k = one_hot_P @ output_points[pairing_points]
k = k / (torch.sparse.sum(one_hot_P, 1).to_dense()[:, None] + 1e-6)
q = one_hot_I @ output_images.permute(0, 2, 3, 1).flatten(0, 2)
q = q / (torch.sparse.sum(one_hot_I, 1).to_dense()[:, None] + 1e-6)
mask = torch.where(k[:, 0] != 0)
k = k[mask]
q = q[mask]
return self.criterion(k, q)
def training_epoch_end(self, outputs):
self.epoch += 1
if self.epoch == self.num_epochs:
self.save()
return super().training_epoch_end(outputs)
def validation_step(self, batch, batch_idx):
sinput_C = batch["sinput_C"]
sinput_F = batch["sinput_F"]
if self._config['dataset'] == "scannet":
sparse_input = ME.SparseTensor(sinput_F.float(), coordinates=sinput_C.int())
else:
sparse_input = spvcnn_SparseTensor(sinput_F, sinput_C)
output_points = self.model_points(sparse_input)
self.model_images.eval()
output_images = self.model_images(batch["input_I"])
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
self.val_losses.append(loss.detach().cpu())
self.log(
"val_loss", loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True, batch_size=self.batch_size
)
return loss
@rank_zero_only
def save(self):
path = os.path.join(self.working_dir, "model.pt")
torch.save(
{
"model_points": self.model_points.state_dict(),
"model_images": self.model_images.state_dict(),
"model_fusion": self.model_fusion.state_dict(),
"epoch": self.epoch,
"config": self._config,
},
path,
)
| 14,055 | 39.507205 | 132 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/dataloader_nuscenes.py | import os
import copy
import torch
import numpy as np
from PIL import Image
# import MinkowskiEngine as ME
from pyquaternion import Quaternion
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
from torchsparse.utils.quantize import sparse_quantize
from abc import ABC, abstractmethod
import json
import cv2
import pickle
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def minkunet_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
coords,
feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
inverse_indexes_merged,
sweepIds_group,
sweep_pairing_group,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
offset = 0
offset_inverse_indexes = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, -1] = batch_id
pairing_points[batch_id][:] += offset_inverse_indexes
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
inverse_indexes[batch_id][:] += offset
inverse_indexes_merged[batch_id][:] += offset
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
offset_inverse_indexes += inverse_indexes[batch_id].shape[0]
coords_batch = torch.cat(coords, 0).int()
pairing_points = torch.cat(pairing_points, 0)
pairing_images = torch.cat(pairing_images, 0)
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
sweepIds_group = torch.cat(sweepIds_group, 0)
inverse_indexes_merged = torch.cat(inverse_indexes_merged, 0)
inverse_indexes_group = torch.cat(inverse_indexes, 0)
return {
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"inverse_indexes_group": inverse_indexes_group,
"inverse_indexes_merged": inverse_indexes_merged,
"sweepIds": sweepIds_group,
"sweep_pairing_group": sweep_pairing_group,
}
class NuScenesMatchDataset(Dataset):
"""
Dataset matching a 3D points cloud and an image using projection.
"""
def __init__(
self,
phase,
config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
self.superpixels_type = config["superpixels_type"]
self.bilinear_decoder = config["decoder"] == "bilinear"
self.config = config
self.dataroot = config['dataRoot_nuscenes']
if "cached_nuscenes" in kwargs:
self.nusc = kwargs["cached_nuscenes"]
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot=self.dataroot, verbose=False
)
self.list_keyframes = []
# a skip ratio can be used to reduce the dataset size and accelerate experiments
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
skip_counter = 0
if phase in ("train", "val", "test"):
phase_scenes = create_splits_scenes()[phase]
elif phase == "parametrizing":
phase_scenes = list(
set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
)
elif phase == "verifying":
phase_scenes = CUSTOM_SPLIT
# create a list of camera & lidar scans
for scene_idx in range(len(self.nusc.scene)):
scene = self.nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
skip_counter += 1
if skip_counter % skip_ratio == 0:
self.create_list_of_scans(scene)
with open('/nvme/konglingdong/youquan/nuscenes_infos_10sweeps_train.pkl', 'rb') as f:
self.sweeps_infos = pickle.load(f)
tem = {}
for info in self.sweeps_infos:
tem[info['lidar_path']] = {'sweeps': info['sweeps']}
self.sweeps_infos = tem
self.max_sweeps = self.config['max_sweeps']
print(phase)
print(len(phase_scenes))
def create_list_of_scans(self, scene):
# Get first and last keyframe in the scene
current_sample_token = scene["first_sample_token"]
# print("current_sample_token", current_sample_token)
# Loop to get all successive keyframes
list_data = []
while current_sample_token != "":
current_sample = self.nusc.get("sample", current_sample_token)
list_data.append(current_sample["data"])
current_sample_token = current_sample["next"]
# Add new scans in the list
self.list_keyframes.extend(list_data)
def get_sweep(self, sweep_info):
def remove_ego_points(points, center_radius=1.0):
mask = ~((np.abs(points[:, 0]) < center_radius) & (np.abs(points[:, 1]) < center_radius))
return points[mask]
lidar_name = sweep_info['lidar_path']
lidar_path = os.path.join(self.dataroot, lidar_name)
pc_original = LidarPointCloud.from_file(lidar_path)
points_sweep = pc_original.points.T[:, :4]
points_sweep = remove_ego_points(points_sweep).T
if sweep_info['transform_matrix'] is not None:
num_points = points_sweep.shape[1]
points_sweep[:3, :] = sweep_info['transform_matrix'].dot(
np.vstack((points_sweep[:3, :], np.ones(num_points))))[:3, :]
cur_times = sweep_info['time_lag'] * np.ones((1, points_sweep.shape[1]))
return points_sweep.T, cur_times.T
def get_lidar_with_sweeps(self, lidar_name, max_sweeps=1):
info = self.sweeps_infos[lidar_name]
lidar_path = os.path.join(self.nusc.dataroot, lidar_name)
pc_original = LidarPointCloud.from_file(lidar_path)
points = pc_original.points.T[:, :4]
name_list = [lidar_name]
sweep_points_list = [points]
for k in np.random.choice(len(info['sweeps']), max_sweeps - 1, replace=False):
points_sweep, times_sweep = self.get_sweep(info['sweeps'][k])
sweep_points_list.append(points_sweep)
name_list.append(info['sweeps'][k]['lidar_path'])
points = np.concatenate(sweep_points_list, axis=0)
return sweep_points_list, points
def map_pointcloud_to_image(self, point_merged, data, lidar_name, min_dist: float = 1.0, multi_sweeps=True):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
pc_original = LidarPointCloud.from_points(point_merged)
pc_ref = pc_original.points
images = []
pairing_points = np.empty(0, dtype=np.int64)
pairing_images = np.empty((0, 3), dtype=np.int64)
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
if self.shuffle:
np.random.shuffle(camera_list)
for i, camera_name in enumerate(camera_list):
pc = copy.deepcopy(pc_original)
cam = self.nusc.get("sample_data", data[camera_name])
im = np.array(Image.open(os.path.join(self.nusc.dataroot, cam["filename"])))
# Points live in the point sensor frame. So they need to be transformed via
# global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the
# timestamp of the sweep.
cs_record = self.nusc.get(
"calibrated_sensor", pointsensor["calibrated_sensor_token"]
)
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
pc.translate(np.array(cs_record["translation"]))
# Second step: transform from ego to the global frame.
poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
pc.translate(np.array(poserecord["translation"]))
# Third step: transform from global into the ego vehicle frame for the
# timestamp of the image.
poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
pc.translate(-np.array(poserecord["translation"]))
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
cs_record = self.nusc.get(
"calibrated_sensor", cam["calibrated_sensor_token"]
)
pc.translate(-np.array(cs_record["translation"]))
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
# Take the actual picture
# (matrix multiplication with camera-matrix + renormalization).
points = view_points(
pc.points[:3, :],
np.array(cs_record["camera_intrinsic"]),
normalize=True,
)
# Remove points that are either outside or behind the camera.
# Also make sure points are at least 1m in front of the camera to avoid
# seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
points = points[:2].T
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
mask = np.logical_and(mask, points[:, 0] > 0)
mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
mask = np.logical_and(mask, points[:, 1] > 0)
mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
matching_points = np.where(mask)[0]
matching_pixels = np.round(
np.flip(points[matching_points], axis=1)
).astype(np.int64)
images.append(im / 255)
pairing_points = np.concatenate((pairing_points, matching_points))
pairing_images = np.concatenate(
(
pairing_images,
np.concatenate(
(
np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
matching_pixels,
),
axis=1,
),
)
)
return pc_ref.T, images, pairing_points, pairing_images
def __len__(self):
return len(self.list_keyframes)
def voxelizaton(self, pc):
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
phi = torch.atan2(y, x) * 180 / np.pi # corresponds to a split each 1°
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
discrete_coords, indexes, inverse_indexes = sparse_quantize(
coords_aug.contiguous().numpy(), return_index=True, return_inverse=True
)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
return discrete_coords, indexes, inverse_indexes
def __getitem__(self, idx):
data = self.list_keyframes[idx]
pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
lidar_name = pointsensor["filename"]
sweep_points_list, point_merged = self.get_lidar_with_sweeps(lidar_name, max_sweeps=self.max_sweeps)
point_merged = torch.from_numpy(point_merged)
pc = point_merged[:, :3]
"""
# merged point cloud
"""
discrete_coords_merged, indexes_merged, inverse_indexes_merged = self.voxelizaton(pc)
"""
# sweep point cloud
"""
discrete_coords_group = []
inverse_indexes_group = []
unique_feats_group = []
sweepIds_group = []
pairing_points_group = []
images_group = []
pairing_images_group = []
sweep_pairing_group = []
t = 0
offset_points = 0
offset_inverse_indexes = 0
for sweep_id, sweep_points in enumerate(sweep_points_list):
(
pc,
images,
pairing_points,
pairing_images,
) = self.map_pointcloud_to_image(sweep_points, data, lidar_name, multi_sweeps=False)
intensity = torch.tensor(sweep_points[:, 3:])
pc = torch.tensor(sweep_points[:, :3])
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
if self.cloud_transforms:
pc = self.cloud_transforms(pc)
if self.mixed_transforms:
(
pc,
intensity,
images,
pairing_points,
pairing_images,
) = self.mixed_transforms(
pc, intensity, images, pairing_points, pairing_images
)
discrete_coords, indexes, inverse_indexes = self.voxelizaton(pc)
pairing_points_group.append(torch.from_numpy(pairing_points[:]) + offset_inverse_indexes)
pairing_images[:, 0] += sweep_id * 6
pairing_images_group.append(torch.from_numpy(pairing_images))
inverse_indexes_group.append(inverse_indexes[:] + offset_points)
discrete_coords_group.append(discrete_coords)
unique_feats_group.append(intensity[indexes])
images_group.append(images)
sweepIds_group.append(t * torch.ones(discrete_coords.shape[0]))
sweep_pairing_group.append(t * torch.ones(pairing_images.shape[0]))
offset_points += discrete_coords.shape[0]
offset_inverse_indexes += inverse_indexes.shape[0]
t += 1
discrete_coords_group = torch.cat(discrete_coords_group, dim=0)
inverse_indexes_group = torch.cat(inverse_indexes_group, dim=0)
pairing_images_group = torch.cat(pairing_images_group, dim=0)
unique_feats_group = torch.cat(unique_feats_group, dim=0)
sweepIds_group = torch.cat(sweepIds_group, dim=0)
sweep_pairing_group = torch.cat(sweep_pairing_group, dim=0)
pairing_points_group = torch.cat(pairing_points_group, dim=0)
images_group = torch.cat(images_group, dim=0)
assert pairing_points_group.shape[0] == pairing_images_group.shape[0]
assert pairing_points_group.shape[0] == sweep_pairing_group.shape[0]
assert discrete_coords_group.shape[0] == sweepIds_group.shape[0]
assert inverse_indexes_group.shape[0] == inverse_indexes_merged.shape[0]
discrete_coords_group = torch.cat(
(
discrete_coords_group,
torch.zeros(discrete_coords_group.shape[0], 1, dtype=torch.int32),
),
1,
)
return (
discrete_coords_group,
unique_feats_group,
images_group,
pairing_points_group,
pairing_images_group,
inverse_indexes_group,
inverse_indexes_merged,
sweepIds_group,
sweep_pairing_group,
)
| 18,090 | 39.113082 | 147 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/dataloader_nuscenes_spconv.py | import os
import copy
import torch
import numpy as np
from PIL import Image
from pyquaternion import Quaternion
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
from spconv.utils import VoxelGeneratorV2 as VoxelGenerator
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def mean_vfe(voxel_features, voxel_num_points):
# voxel_features, voxel_num_points = batch_dict['voxels'], batch_dict['voxel_num_points']
points_mean = voxel_features[:, :, :].sum(dim=1, keepdim=False)
normalizer = torch.clamp_min(voxel_num_points.view(-1, 1), min=1.0).type_as(voxel_features)
points_mean = points_mean / normalizer
voxel_features = points_mean.contiguous()
return voxel_features
def spconv_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
pc,
coords,
feats,
images,
pairing_points,
pairing_images,
num_points,
superpixels,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
pc_batch = []
offset = 0
for batch_id in range(len(pc)):
pc_batch.append(torch.cat((torch.ones((pc[batch_id].shape[0], 1)) * batch_id, pc[batch_id]), 1))
pairing_points[batch_id][:] += offset
offset += pc[batch_id].shape[0]
offset = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, 0] = batch_id
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
# Concatenate all lists
coords_batch = torch.cat(coords, 0).int()
pc_batch = torch.cat(pc_batch, 0)
pairing_points = torch.tensor(np.concatenate(pairing_points))
pairing_images = torch.tensor(np.concatenate(pairing_images))
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
superpixels_batch = torch.tensor(np.concatenate(superpixels))
num_points = torch.cat(num_points, 0)
feats_batch = mean_vfe(feats_batch, num_points)
return {
"pc": pc_batch,
"coordinates": coords_batch,
"voxels": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"num_points": num_points,
"superpixels": superpixels_batch,
}
class NuScenesMatchDatasetSpconv(Dataset):
"""
Dataset matching a 3D points cloud and an image using projection.
"""
def __init__(
self,
phase,
config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
if config["dataset"] == "nuscenes":
self.voxel_size = [0.1, 0.1, 0.2] # nuScenes
self.point_cloud_range = np.array([-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], dtype=np.float32) # nuScenes
MAX_POINTS_PER_VOXEL = 10 # nuScenes
MAX_NUMBER_OF_VOXELS = 60000 # nuScenes
self._voxel_generator = VoxelGenerator(
voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range,
max_num_points=MAX_POINTS_PER_VOXEL,
max_voxels=MAX_NUMBER_OF_VOXELS
)
else:
raise Exception("Dataset unknown")
self.superpixels_type = config["superpixels_type"]
self.bilinear_decoder = config["decoder"] == "bilinear"
self.num_point_features = 4
if "cached_nuscenes" in kwargs:
self.nusc = kwargs["cached_nuscenes"]
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot="datasets/nuscenes", verbose=False
)
self.list_keyframes = []
# a skip ratio can be used to reduce the dataset size and accelerate experiments
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
skip_counter = 0
if phase in ("train", "val", "test"):
phase_scenes = create_splits_scenes()[phase]
elif phase == "parametrizing":
phase_scenes = list(
set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
)
elif phase == "verifying":
phase_scenes = CUSTOM_SPLIT
# create a list of camera & lidar scans
for scene_idx in range(len(self.nusc.scene)):
scene = self.nusc.scene[scene_idx]
if scene["name"] in phase_scenes:
skip_counter += 1
if skip_counter % skip_ratio == 0:
self.create_list_of_scans(scene)
def create_list_of_scans(self, scene):
# Get first and last keyframe in the scene
current_sample_token = scene["first_sample_token"]
# Loop to get all successive keyframes
list_data = []
while current_sample_token != "":
current_sample = self.nusc.get("sample", current_sample_token)
list_data.append(current_sample["data"])
current_sample_token = current_sample["next"]
# Add new scans in the list
self.list_keyframes.extend(list_data)
def map_pointcloud_to_image(self, data, min_dist: float = 1.0):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
pcl_path = os.path.join(self.nusc.dataroot, pointsensor["filename"])
pc_original = LidarPointCloud.from_file(pcl_path)
pc = pc_original.points
dist = pc[0] * pc[0] + pc[1] * pc[1]
mask = (dist <= 2621.44) & \
(pc[2] >= self.point_cloud_range[2]) & \
(pc[2] <= self.point_cloud_range[5])
pc_original = LidarPointCloud(pc[:, mask])
pc_ref = pc_original.points
images = []
superpixels = []
pairing_points = np.empty(0, dtype=np.int64)
pairing_images = np.empty((0, 3), dtype=np.int64)
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
if self.shuffle:
np.random.shuffle(camera_list)
for i, camera_name in enumerate(camera_list):
pc = copy.deepcopy(pc_original)
cam = self.nusc.get("sample_data", data[camera_name])
im = np.array(Image.open(os.path.join(self.nusc.dataroot, cam["filename"])))
sp = Image.open(
f"superpixels/nuscenes/"
f"superpixels_{self.superpixels_type}/{cam['token']}.png"
)
superpixels.append(np.array(sp))
# Points live in the point sensor frame. So they need to be transformed via
# global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the
# timestamp of the sweep.
cs_record = self.nusc.get(
"calibrated_sensor", pointsensor["calibrated_sensor_token"]
)
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
pc.translate(np.array(cs_record["translation"]))
# Second step: transform from ego to the global frame.
poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
pc.translate(np.array(poserecord["translation"]))
# Third step: transform from global into the ego vehicle frame for the
# timestamp of the image.
poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
pc.translate(-np.array(poserecord["translation"]))
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
cs_record = self.nusc.get(
"calibrated_sensor", cam["calibrated_sensor_token"]
)
pc.translate(-np.array(cs_record["translation"]))
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
# Take the actual picture
# (matrix multiplication with camera-matrix + renormalization).
points = view_points(
pc.points[:3, :],
np.array(cs_record["camera_intrinsic"]),
normalize=True,
)
# Remove points that are either outside or behind the camera.
# Also make sure points are at least 1m in front of the camera to avoid
# seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
points = points[:2].T
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
mask = np.logical_and(mask, points[:, 0] > 0)
mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
mask = np.logical_and(mask, points[:, 1] > 0)
mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
matching_points = np.where(mask)[0]
matching_pixels = np.round(
np.flip(points[matching_points], axis=1)
).astype(np.int64)
images.append(im / 255)
pairing_points = np.concatenate((pairing_points, matching_points))
pairing_images = np.concatenate(
(
pairing_images,
np.concatenate(
(
np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
matching_pixels,
),
axis=1,
),
)
)
return pc_ref.T, images, pairing_points, pairing_images, np.stack(superpixels)
def __len__(self):
return len(self.list_keyframes)
def _voxelize(self, points):
voxel_output = self._voxel_generator.generate(points.numpy())
voxels, coordinates, num_points = \
voxel_output['voxels'], voxel_output['coordinates'], voxel_output['num_points_per_voxel']
return voxels, coordinates, num_points
def __getitem__(self, idx):
(
pc,
images,
pairing_points,
pairing_images,
superpixels,
) = self.map_pointcloud_to_image(self.list_keyframes[idx])
superpixels = torch.tensor(superpixels)
intensity = torch.tensor(pc[:, 3:])
pc = torch.tensor(pc[:, :3])
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
if self.cloud_transforms:
pc = self.cloud_transforms(pc)
if self.mixed_transforms:
(
pc,
intensity,
images,
pairing_points,
pairing_images,
superpixels,
) = self.mixed_transforms(
pc, intensity, images, pairing_points, pairing_images, superpixels
)
pc = torch.cat((pc, intensity), 1)
voxels, coordinates, num_points = self._voxelize(pc)
discrete_coords = torch.cat(
(
torch.zeros(coordinates.shape[0], 1, dtype=torch.int32),
torch.tensor(coordinates),
),
1,
)
voxels = torch.tensor(voxels)
num_points = torch.tensor(num_points)
return (
pc,
discrete_coords,
voxels,
images,
pairing_points,
pairing_images,
num_points,
superpixels,
)
| 14,192 | 38.756303 | 114 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/lightning_trainer_spconv.py | import os
import re
import torch
import numpy as np
import torch.optim as optim
import pytorch_lightning as pl
from pretrain.criterion import NCELoss
from pytorch_lightning.utilities import rank_zero_only
def bilinear_interpolate_torch(im, x, y):
"""
Args:
im: (H, W, C) [y, x]
x: (N)
y: (N)
Returns:
"""
x0 = torch.floor(x).long()
x1 = x0 + 1
y0 = torch.floor(y).long()
y1 = y0 + 1
x0 = torch.clamp(x0, 0, im.shape[1] - 1)
x1 = torch.clamp(x1, 0, im.shape[1] - 1)
y0 = torch.clamp(y0, 0, im.shape[0] - 1)
y1 = torch.clamp(y1, 0, im.shape[0] - 1)
Ia = im[y0, x0]
Ib = im[y1, x0]
Ic = im[y0, x1]
Id = im[y1, x1]
wa = (x1.type_as(x) - x) * (y1.type_as(y) - y)
wb = (x1.type_as(x) - x) * (y - y0.type_as(y))
wc = (x - x0.type_as(x)) * (y1.type_as(y) - y)
wd = (x - x0.type_as(x)) * (y - y0.type_as(y))
ans = torch.t((torch.t(Ia) * wa)) + torch.t(torch.t(Ib) * wb) + torch.t(torch.t(Ic) * wc) + torch.t(torch.t(Id) * wd)
return ans
def interpolate_from_bev_features(keypoints, bev_features, batch_size, bev_stride):
"""
Args:
keypoints: (N1 + N2 + ..., 4)
bev_features: (B, C, H, W)
batch_size:
bev_stride:
Returns:
point_bev_features: (N1 + N2 + ..., C)
"""
# voxel_size = [0.05, 0.05, 0.1] # KITTI
voxel_size = [0.1, 0.1, 0.2] # nuScenes
# point_cloud_range = np.array([0., -40., -3., 70.4, 40., 1.], dtype=np.float32) # KITTI
point_cloud_range = np.array([-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], dtype=np.float32) # nuScenes
x_idxs = (keypoints[:, 1] - point_cloud_range[0]) / voxel_size[0]
y_idxs = (keypoints[:, 2] - point_cloud_range[1]) / voxel_size[1]
x_idxs = x_idxs / bev_stride
y_idxs = y_idxs / bev_stride
point_bev_features_list = []
for k in range(batch_size):
bs_mask = (keypoints[:, 0] == k)
cur_x_idxs = x_idxs[bs_mask]
cur_y_idxs = y_idxs[bs_mask]
cur_bev_features = bev_features[k].permute(1, 2, 0) # (H, W, C)
point_bev_features = bilinear_interpolate_torch(cur_bev_features, cur_x_idxs, cur_y_idxs)
point_bev_features_list.append(point_bev_features)
point_bev_features = torch.cat(point_bev_features_list, dim=0) # (N1 + N2 + ..., C)
return point_bev_features
class LightningPretrainSpconv(pl.LightningModule):
def __init__(self, model_points, model_images, config):
super().__init__()
self.model_points = model_points
self.model_images = model_images
self._config = config
self.losses = config["losses"]
self.train_losses = []
self.val_losses = []
self.num_matches = config["num_matches"]
self.batch_size = config["batch_size"]
self.num_epochs = config["num_epochs"]
self.superpixel_size = config["superpixel_size"]
self.epoch = 0
if config["resume_path"] is not None:
self.epoch = int(
re.search(r"(?<=epoch=)[0-9]+", config["resume_path"])[0]
)
self.criterion = NCELoss(temperature=config["NCE_temperature"])
self.working_dir = os.path.join(config["working_dir"], config["datetime"])
if os.environ.get("LOCAL_RANK", 0) == 0:
os.makedirs(self.working_dir, exist_ok=True)
def configure_optimizers(self):
optimizer = optim.SGD(
list(self.model_points.parameters()) + list(self.model_images.parameters()),
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, self.num_epochs)
return [optimizer], [scheduler]
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
optimizer.zero_grad(set_to_none=True)
def training_step(self, batch, batch_idx):
output_points = self.model_points(batch["voxels"], batch["coordinates"])
output_points = interpolate_from_bev_features(batch["pc"], output_points, self.batch_size, self.model_points.bev_stride)
self.model_images.eval()
self.model_images.decoder.train()
output_images = self.model_images(batch["input_I"])
del batch["voxels"]
del batch["coordinates"]
# each loss is applied independtly on each GPU
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
torch.cuda.empty_cache()
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True, batch_size=self.batch_size
)
self.train_losses.append(loss.detach().cpu())
return loss
def loss(self, batch, output_points, output_images):
pairing_points = batch["pairing_points"]
pairing_images = batch["pairing_images"]
idx = np.random.choice(pairing_points.shape[0], self.num_matches, replace=False)
k = output_points[pairing_points[idx]]
m = tuple(pairing_images[idx].T.long())
q = output_images.permute(0, 2, 3, 1)[m]
return self.criterion(k, q)
def loss_superpixels_average(self, batch, output_points, output_images):
# compute a superpoints to superpixels loss using superpixels
torch.cuda.empty_cache() # This method is extremely memory intensive
superpixels = batch["superpixels"]
pairing_images = batch["pairing_images"]
pairing_points = batch["pairing_points"]
superpixels = (
torch.arange(
0,
output_images.shape[0] * self.superpixel_size,
self.superpixel_size,
device=self.device,
)[:, None, None] + superpixels
)
m = tuple(pairing_images.cpu().T.long())
superpixels_I = superpixels.flatten()
idx_P = torch.arange(pairing_points.shape[0], device=superpixels.device)
total_pixels = superpixels_I.shape[0]
idx_I = torch.arange(total_pixels, device=superpixels.device)
with torch.no_grad():
one_hot_P = torch.sparse_coo_tensor(
torch.stack((
superpixels[m], idx_P
), dim=0),
torch.ones(pairing_points.shape[0], device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, pairing_points.shape[0])
)
one_hot_I = torch.sparse_coo_tensor(
torch.stack((
superpixels_I, idx_I
), dim=0),
torch.ones(total_pixels, device=superpixels.device),
(superpixels.shape[0] * self.superpixel_size, total_pixels)
)
k = one_hot_P @ output_points[pairing_points]
k = k / (torch.sparse.sum(one_hot_P, 1).to_dense()[:, None] + 1e-6)
q = one_hot_I @ output_images.permute(0, 2, 3, 1).flatten(0, 2)
q = q / (torch.sparse.sum(one_hot_I, 1).to_dense()[:, None] + 1e-6)
mask = torch.where(k[:, 0] != 0)
k = k[mask]
q = q[mask]
return self.criterion(k, q)
def training_epoch_end(self, outputs):
self.epoch += 1
if self.epoch == self.num_epochs:
self.save()
return super().training_epoch_end(outputs)
def validation_step(self, batch, batch_idx):
output_points = self.model_points(batch["voxels"], batch["coordinates"])
output_points = interpolate_from_bev_features(batch["pc"], output_points, self.batch_size, self.model_points.bev_stride)
self.model_images.eval()
output_images = self.model_images(batch["input_I"])
losses = [
getattr(self, loss)(batch, output_points, output_images)
for loss in self.losses
]
loss = torch.mean(torch.stack(losses))
self.val_losses.append(loss.detach().cpu())
self.log(
"val_loss", loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True, batch_size=self.batch_size
)
return loss
@rank_zero_only
def save(self):
path = os.path.join(self.working_dir, "model.pt")
torch.save(
{
"model_points": self.model_points.state_dict(),
"model_images": self.model_images.state_dict(),
"epoch": self.epoch,
"config": self._config,
},
path,
)
| 8,642 | 35.778723 | 128 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/pc_utils.py | """ Utility functions for processing point clouds.
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
import warnings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Draw point cloud
from eulerangles import euler2mat
import math
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
import torch
import random
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
#a = np.zeros((16,1024,3))
#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
#print loc2pc
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
#print 'pc center: ', pc_center
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
#print (i,j,k), vol[i,j,k,:,:]
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
def surface_normal_area(face, vertex):
normals = list()
areas = list()
vertex_to_face = [[] for i in range(len(vertex))]
for fid, f in enumerate(face):
f = f[0]
va, vb, vc = f[0], f[1], f[2]
vertex_to_face[va].append(fid)
vertex_to_face[vb].append(fid)
vertex_to_face[vc].append(fid)
a = vertex[vb] - vertex[va]
b = vertex[vc] - vertex[va]
normal = np.cross(a, b)
area = np.dot(normal, normal) / 2.0
normalized_normal = normal / np.linalg.norm(normal)
normals.append(normalized_normal)
areas.append(area)
return np.array(normals), np.array(areas), vertex_to_face
def vertex_normal(vertex_to_face, normal, areas):
vertex_normals = list()
num_vertex = len(vertex_to_face)
for vid in range(num_vertex):
adj_faces = vertex_to_face[vid]
if len(adj_faces)==0: # single point with no adjancy points
vertex_normals.append([0,0,1])
continue
adj_faces_area = np.expand_dims(np.array(areas[adj_faces]), axis=-1)
adj_faces_normal = np.array(normal[adj_faces])
avg_normal = (adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area)
avg_normal = np.sum(avg_normal, axis=0)
normalized_normal = avg_normal / np.linalg.norm(avg_normal)
#if np.isclose(np.linalg.norm(avg_normal), 0.0):
# print('-------------------')
# print(len(adj_faces))
# print('-------------------')
# print('-------------------')
# print(adj_faces_area.shape, adj_faces_normal.shape, adj_faces_area, adj_faces_normal)
# print(adj_faces_normal * adj_faces_area)
# print(np.sum(adj_faces_area))
# print((adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area))
# print(avg_normal, np.linalg.norm(avg_normal), adj_faces_area, adj_faces_normal)
# print('-------------------')
vertex_normals.append(normalized_normal)
return np.array(vertex_normals)
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def read_ply_rgba(filename):
""" read XYZRGBA point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
return pc_array
def read_ply_rgba_normal(filename):
""" read XYZRGBA and NxNyNz point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
face = plydata['face'].data
f_n, f_a, v_f = surface_normal_area(face, pc_array[:, 0:3])
v_n = vertex_normal(v_f, f_n, f_a)
pc_array = np.concatenate((pc_array, v_n), axis=-1)
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb_normal(points, colors, normals, filename, text=True):
""" input: Nx3, Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
normals = [(normals[i,0], normals[i,1], normals[i,2]) for i in range(normals.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
normal = np.array(normals, dtype=[('nx', 'f4'), ('ny', 'f4'),('nz', 'f4')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr + normal.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
for prop in normal.dtype.names:
vertex_all[prop] = normal[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+y is up direction)
Output:
gray image as numpy array of size canvasSizexcanvasSize
"""
image = np.zeros((canvasSize, canvasSize))
if input_points is None or input_points.shape[0] == 0:
return image
points = input_points[:, switch_xyz]
M = euler2mat(zrot, yrot, xrot)
points = (np.dot(M, points.transpose())).transpose()
# Normalize the point cloud
# We normalize scale to fit points in a unit sphere
if normalize:
centroid = np.mean(points, axis=0)
points -= centroid
furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
points /= furthest_distance
# Pre-compute the Gaussian disk
radius = (diameter-1)/2.0
disk = np.zeros((diameter, diameter))
for i in range(diameter):
for j in range(diameter):
if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
mask = np.argwhere(disk > 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
from PIL import Image
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
def write_ply_color(points, labels, out_filename, num_classes=None, colors=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
print(num_classes)
else:
assert(num_classes>np.max(labels))
if colors is None:
#colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[labels[i]]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def farthest_pts_sampling_abuse(pts, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
diff = pts[:, None, :] - pts[None, :, :]
# dis_mat = np.sum(diff * diff, axis=2)
dis_mat = np.linalg.norm(diff, axis=2)
N = num_samples
perm = np.zeros(N, dtype=np.int64)
lambdas = np.zeros(N)
ds = dis_mat[0, :]
for i in range(1, N):
idx = np.argmax(ds)
perm[i] = idx
lambdas[i] = ds[idx]
ds = np.minimum(ds, dis_mat[idx, :])
return pts[perm, :]
def farthest_pts_sampling(coords, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
pts = coords.numpy()
dis_mat = np.linalg.norm(pts, axis=2)
point_set = []
perm = np.zeros(num_samples, dtype=np.int64)
index = random.randint(0, pts.shape[0] - 1)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
for i in range(1, num_samples):
refer = pts[index]
diff = np.linalg.norm(pts[:, :] - refer[None, :], axis=1)
index = np.argmin(diff)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
point_set = np.vstack(point_set)
return point_set
def random_partition(coords):
# print('1')
mask = torch.ones(coords.size()[0]).numpy()
coords_np = coords.numpy()
sample_num = random.randint(2, 5)
random_index = np.random.randint(coords_np.shape[0], size=sample_num)
sample_points = coords_np[random_index, :]
diff = coords_np[:, None, :] - sample_points[None, :, :]
diff = np.linalg.norm(diff, axis=2)
partitions = np.argmin(diff, axis=1)
filter_ind = random.randint(0, sample_num - 1)
# coords_torch = torch.from_numpy(coords_np[partitions != filter_ind])
coords_torch = coords
mask[partitions == filter_ind] = 0
mask = torch.from_numpy(mask)
# print('4')
# part1 = torch.from_numpy(coords_np[partitions == filter_ind])
# part2 = torch.from_numpy(coords_np[partitions != filter_ind])
return coords_torch, mask
# return part1, part2
def random_rotation(coords):
# scale = torch.eye(3)*random.uniform(0.95, 1.05)
scale_flip = np.eye(3) + np.random.randn(3, 3) * 0.1
scale_flip[0][0] *= np.random.randint(0, 2) * 2 - 1
scale_flip = torch.from_numpy(scale_flip).float()
# scale = torch.eye(3)
theta = random.uniform(0, 2) * math.pi
rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]).float()
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
#
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
m = torch.matmul(scale_flip, rotationx)
coords = torch.matmul(coords.float(), m)
return coords
# def random_rotation(coords):
# return coords
def resize_rotation(coords, item):
scale = 0
if item == 'chair':
scale = torch.eye(3) * 0.8
elif item == 'sofa':
scale = torch.eye(3) * 1.75
elif item == 'table':
scale = torch.eye(3) * 1.65
elif item == 'bookshelf':
scale = torch.eye(3) * 1.7
elif item == 'desk':
scale = torch.eye(3) * 1.25
elif item == 'bed':
scale = torch.eye(3) * 2.1
elif item == 'sink':
scale = torch.eye(3) * 1.05
elif item == 'bathtub':
scale = torch.eye(3) * 1.25
elif item == 'toilet':
scale = torch.eye(3) * 0.65
elif item == 'door':
scale = torch.eye(3) * 1.8
elif item == 'curtain':
scale = torch.eye(3) * 2
else :
scale = torch.eye(3) * random.uniform(0.9, 1.75)
'''
if item == 'chair':
scale = torch.eye(3) * random.uniform(5, 5.5)
elif item == 'bed':
scale = torch.eye(3) * random.uniform(1.4, 1.6)
elif item == 'sofa':
scale = torch.eye(3) * random.uniform(9, 9.5)
elif item == 'table':
scale = torch.eye(3) * random.uniform(8, 8.5)
elif item == 'bookshelf':
scale = torch.eye(3) * random.uniform(1.1, 1.2)
elif item == 'desk':
scale = torch.eye(3) * random.uniform(7, 7.5)
elif item == 'nega_data':
scale = torch.eye(3) * random.uniform(5, 8)
'''
# theta = 0 * math.pi
# rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
# [-math.sin(theta), math.cos(theta), 0],
# [0, 0, 1]]).float()
#
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
# m = torch.matmul(scale, rotationz)
m = scale
coords = torch.matmul(coords.float(), m)
return coords | 21,734 | 35.529412 | 104 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/criterion.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class NCELoss(nn.Module):
"""
Compute the PointInfoNCE loss
"""
def __init__(self, temperature):
super(NCELoss, self).__init__()
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, k, q):
logits = torch.mm(k, q.transpose(1, 0))
# print(logits)
target = torch.arange(k.shape[0], device=k.device).long()
out = torch.div(logits, self.temperature)
out = out.contiguous()
import pdb
pdb.set_trace()
loss = self.criterion(out, target)
return loss
class semantic_NCELoss(nn.Module):
"""
Compute the PointInfoNCE loss
"""
def __init__(self, temperature):
super(semantic_NCELoss, self).__init__()
self.temperature = temperature
self.criterion = nn.CrossEntropyLoss()
def forward(self, k, q, pseudo_label):
logits = torch.mm(k, q.transpose(1, 0))
# print(logits)
target = torch.arange(k.shape[0], device=k.device).long()
logits = torch.div(logits, self.temperature)
# out = out.contiguous()
permute = pseudo_label.unsqueeze(-1).repeat(1, pseudo_label.shape[0])
mask = permute == permute.permute(1, 0)
mask_diag = torch.diag_embed(torch.Tensor([True] * pseudo_label.shape[0])).to(k.device).bool()
mask = mask & (~mask_diag)
logits[mask] = 0
logits_sparse = logits.to_sparse()
logits_sparse = torch.sparse.log_softmax(logits_sparse, dim=1).to_dense()
# d_sparse = d.to_sparse()
# torch.sparse.log_softmax(d_sparse, dim=0)
# torch.sparse.log_softmax(d_sparse, dim=1).to_dense()
# import pdb
# pdb.set_trace()
loss = F.nll_loss(logits_sparse, target)
# loss = self.criterion(out, target)
return loss
class DistillKL(nn.Module):
"""Distilling the Knowledge in a Neural Network"""
def __init__(self, T):
super(DistillKL, self).__init__()
self.T = T
def forward(self, y_s, y_t):
p_s = F.log_softmax(y_s/self.T, dim=1)
p_t = F.softmax(y_t/self.T, dim=1)
loss = F.kl_div(p_s, p_t, size_average=False) * (self.T**2) / y_s.shape[0]
return loss
eps = 1e-7
class CRDLoss(nn.Module):
"""CRD Loss function
includes two symmetric parts:
(a) using teacher as anchor, choose positive and negatives over the student side
(b) using student as anchor, choose positive and negatives over the teacher side
Args:
opt.s_dim: the dimension of student's feature
opt.t_dim: the dimension of teacher's feature
opt.feat_dim: the dimension of the projection space
opt.nce_k: number of negatives paired with each positive
opt.nce_t: the temperature
opt.nce_m: the momentum for updating the memory buffer
opt.n_data: the number of samples in the training set, therefor the memory buffer is: opt.n_data x opt.feat_dim
"""
def __init__(self, opt):
super(CRDLoss, self).__init__()
self.embed_s = Embed(opt.s_dim, opt.feat_dim)
self.embed_t = Embed(opt.t_dim, opt.feat_dim)
self.contrast = ContrastMemory(opt.feat_dim, opt.n_data, opt.nce_k, opt.nce_t, opt.nce_m)
self.criterion_t = ContrastLoss(opt.n_data)
self.criterion_s = ContrastLoss(opt.n_data)
def forward(self, f_s, f_t, idx, contrast_idx=None):
"""
Args:
f_s: the feature of student network, size [batch_size, s_dim]
f_t: the feature of teacher network, size [batch_size, t_dim]
idx: the indices of these positive samples in the dataset, size [batch_size]
contrast_idx: the indices of negative samples, size [batch_size, nce_k]
Returns:
The contrastive loss
"""
f_s = self.embed_s(f_s)
f_t = self.embed_t(f_t)
out_s, out_t = self.contrast(f_s, f_t, idx, contrast_idx)
s_loss = self.criterion_s(out_s)
t_loss = self.criterion_t(out_t)
loss = s_loss + t_loss
return loss
class ContrastLoss(nn.Module):
"""
contrastive loss, corresponding to Eq (18)
"""
def __init__(self, n_data):
super(ContrastLoss, self).__init__()
self.n_data = n_data
def forward(self, x):
bsz = x.shape[0]
m = x.size(1) - 1
# noise distribution
Pn = 1 / float(self.n_data)
# loss for positive pair
P_pos = x.select(1, 0)
log_D1 = torch.div(P_pos, P_pos.add(m * Pn + eps)).log_()
# loss for K negative pair
P_neg = x.narrow(1, 1, m)
log_D0 = torch.div(P_neg.clone().fill_(m * Pn), P_neg.add(m * Pn + eps)).log_()
loss = - (log_D1.sum(0) + log_D0.view(-1, 1).sum(0)) / bsz
return loss
class Embed(nn.Module):
"""Embedding module"""
def __init__(self, dim_in=1024, dim_out=128):
super(Embed, self).__init__()
self.linear = nn.Linear(dim_in, dim_out)
self.l2norm = Normalize(2)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.linear(x)
x = self.l2norm(x)
return x
class Normalize(nn.Module):
"""normalization layer"""
def __init__(self, power=2):
super(Normalize, self).__init__()
self.power = power
def forward(self, x):
norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)
out = x.div(norm)
return out
class ContrastMemory(nn.Module):
"""
memory buffer that supplies large amount of negative samples.
"""
def __init__(self, inputSize, outputSize, K, T=0.07, momentum=0.5):
super(ContrastMemory, self).__init__()
self.nLem = outputSize
self.unigrams = torch.ones(self.nLem)
self.multinomial = AliasMethod(self.unigrams)
self.multinomial.cuda()
self.K = K
self.register_buffer('params', torch.tensor([K, T, -1, -1, momentum]))
stdv = 1. / math.sqrt(inputSize / 3)
self.register_buffer('memory_v1', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
self.register_buffer('memory_v2', torch.rand(outputSize, inputSize).mul_(2 * stdv).add_(-stdv))
def forward(self, v1, v2, y, idx=None):
K = int(self.params[0].item())
T = self.params[1].item()
Z_v1 = self.params[2].item()
Z_v2 = self.params[3].item()
momentum = self.params[4].item()
batchSize = v1.size(0)
outputSize = self.memory_v1.size(0)
inputSize = self.memory_v1.size(1)
# original score computation
if idx is None:
idx = self.multinomial.draw(batchSize * (self.K + 1)).view(batchSize, -1)
idx.select(1, 0).copy_(y.data)
# sample
weight_v1 = torch.index_select(self.memory_v1, 0, idx.view(-1)).detach()
weight_v1 = weight_v1.view(batchSize, K + 1, inputSize)
out_v2 = torch.bmm(weight_v1, v2.view(batchSize, inputSize, 1))
out_v2 = torch.exp(torch.div(out_v2, T))
# sample
weight_v2 = torch.index_select(self.memory_v2, 0, idx.view(-1)).detach()
weight_v2 = weight_v2.view(batchSize, K + 1, inputSize)
out_v1 = torch.bmm(weight_v2, v1.view(batchSize, inputSize, 1))
out_v1 = torch.exp(torch.div(out_v1, T))
# set Z if haven't been set yet
if Z_v1 < 0:
self.params[2] = out_v1.mean() * outputSize
Z_v1 = self.params[2].clone().detach().item()
print("normalization constant Z_v1 is set to {:.1f}".format(Z_v1))
if Z_v2 < 0:
self.params[3] = out_v2.mean() * outputSize
Z_v2 = self.params[3].clone().detach().item()
print("normalization constant Z_v2 is set to {:.1f}".format(Z_v2))
# compute out_v1, out_v2
out_v1 = torch.div(out_v1, Z_v1).contiguous()
out_v2 = torch.div(out_v2, Z_v2).contiguous()
# update memory
with torch.no_grad():
l_pos = torch.index_select(self.memory_v1, 0, y.view(-1))
l_pos.mul_(momentum)
l_pos.add_(torch.mul(v1, 1 - momentum))
l_norm = l_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v1 = l_pos.div(l_norm)
self.memory_v1.index_copy_(0, y, updated_v1)
ab_pos = torch.index_select(self.memory_v2, 0, y.view(-1))
ab_pos.mul_(momentum)
ab_pos.add_(torch.mul(v2, 1 - momentum))
ab_norm = ab_pos.pow(2).sum(1, keepdim=True).pow(0.5)
updated_v2 = ab_pos.div(ab_norm)
self.memory_v2.index_copy_(0, y, updated_v2)
return out_v1, out_v2
class AliasMethod(object):
"""
From: https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
if probs.sum() > 1:
probs.div_(probs.sum())
K = len(probs)
self.prob = torch.zeros(K)
self.alias = torch.LongTensor([0]*K)
# Sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K.
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.prob[kk] = K*prob
if self.prob[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# Loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture.
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.alias[small] = large
self.prob[large] = (self.prob[large] - 1.0) + self.prob[small]
if self.prob[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
for last_one in smaller+larger:
self.prob[last_one] = 1
def cuda(self):
self.prob = self.prob.cuda()
self.alias = self.alias.cuda()
def draw(self, N):
""" Draw N samples from multinomial """
K = self.alias.size(0)
kk = torch.zeros(N, dtype=torch.long, device=self.prob.device).random_(0, K)
prob = self.prob.index_select(0, kk)
alias = self.alias.index_select(0, kk)
# b is whether a random number is greater than q
b = torch.bernoulli(prob)
oq = kk.mul(b.long())
oj = alias.mul((1-b).long())
return oq + oj | 10,649 | 33.690554 | 120 | py |
CLIP2Scene | CLIP2Scene-main/pretrain/dataloader_kitti.py |
import os
import re
import torch
import numpy as np
from torch.utils.data import Dataset
# from MinkowskiEngine.utils import sparse_quantize
from utils.transforms import make_transforms_clouds
from torchsparse import SparseTensor
from torchsparse.utils.collate import sparse_collate_fn
from torchsparse.utils.quantize import sparse_quantize
import cv2
import copy
TRAIN_SET = {0, 1, 2, 3, 4, 5, 6, 7, 9, 10}
VALIDATION_SET = {8}
TEST_SET = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
def kitti_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
coords,
feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
offset = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, -1] = batch_id
pairing_points[batch_id][:] += offset
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
# Concatenate all lists
coords_batch = torch.cat(coords, 0).int()
# print(coords_batch.size())
pairing_points = torch.tensor(np.concatenate(pairing_points))
pairing_images = torch.tensor(np.concatenate(pairing_images))
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
return {
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"inverse_indexes": inverse_indexes,
}
class KittiMatchDataset(Dataset):
"""
Dataset returning a lidar scene and associated labels.
Note that superpixels fonctionality have been removed.
"""
def __init__(
self,
phase,
config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
self.superpixels_type = config["superpixels_type"]
self.bilinear_decoder = config["decoder"] == "bilinear"
# a skip ratio can be used to reduce the dataset size
# and accelerate experiments
skip_ratio = config["dataset_skip_step"]
if phase in ("train", "parametrizing"):
phase_set = TRAIN_SET
elif phase in ("val", "verifying"):
phase_set = VALIDATION_SET
elif phase == "test":
phase_set = TEST_SET
self.list_files = []
for num in phase_set:
directory = next(
os.walk(
f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/{num:0>2d}/velodyne"
)
)
self.list_files.extend(
map(
lambda x: f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/"
f"{num:0>2d}/velodyne/" + x,
directory[2],
)
)
self.list_files = sorted(self.list_files)[::skip_ratio]
# labels' names lookup table
self.eval_labels = {
0: 0, 1: 0, 10: 1, 11: 2, 13: 5, 15: 3, 16: 5, 18: 4, 20: 5, 30: 6, 31: 7,
32: 8, 40: 9, 44: 10, 48: 11, 49: 12, 50: 13, 51: 14, 52: 0, 60: 9, 70: 15,
71: 16, 72: 17, 80: 18, 81: 19, 99: 0, 252: 1, 253: 7, 254: 6, 255: 8,
256: 5, 257: 5, 258: 4, 259: 5,
}
def select_points_in_frustum(self, points_2d, x1, y1, x2, y2):
"""
Select points in a 2D frustum parametrized by x1, y1, x2, y2 in image coordinates
:param points_2d: point cloud projected into 2D
:param points_3d: point cloud
:param x1: left bound
:param y1: upper bound
:param x2: right bound
:param y2: lower bound
:return: points (2D and 3D) that are in the frustum
"""
keep_ind = (points_2d[:, 0] > x1) * \
(points_2d[:, 1] > y1) * \
(points_2d[:, 0] < x2) * \
(points_2d[:, 1] < y2)
return keep_ind
def read_calib(self, calib_path):
"""
:param calib_path: Path to a calibration text file.
:return: dict with calibration matrices.
"""
calib_all = {}
with open(calib_path, 'r') as f:
for line in f.readlines():
if line == '\n':
break
key, value = line.split(':', 1)
calib_all[key] = np.array([float(x) for x in value.split()])
# reshape matrices
calib_out = {}
calib_out['P2'] = calib_all['P2'].reshape(3, 4) # 3x4 projection matrix for left camera
calib_out['Tr'] = np.identity(4) # 4x4 matrix
calib_out['Tr'][:3, :4] = calib_all['Tr'].reshape(3, 4)
return calib_out
def map_pointcloud_to_image(self, ann_info, min_dist: float = 1.0):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
# pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
points = np.fromfile(ann_info, dtype=np.float32).reshape((-1, 4))
pc_ref = copy.deepcopy(points)
path_splits = ann_info.split('/')
calib_path = os.path.join("/mnt/lustre/share_data/liuyouquan/semantickitti/sequences",path_splits[-3], "calib.txt")
image_path = os.path.join("/mnt/lustre/share_data/chenrunnan/dataset/sequences/",path_splits[-3],"image_2", path_splits[-1].replace("bin", "png"))
image = cv2.imread(image_path)
image = cv2.resize(image, (1241, 376), interpolation=cv2.INTER_LINEAR)
calib = self.read_calib(calib_path)
proj_matrix = calib['P2'] @ calib['Tr']
proj_matrix = proj_matrix.astype(np.float32)
# project points into image
keep_idx = points[:, 0] > 0 # only keep point in front of the vehicle
points_hcoords = np.concatenate([points[:, :3], np.ones([len(points), 1], dtype=np.float32)], axis=1)
img_points = (proj_matrix @ points_hcoords.T).T
matching_pixel = img_points[:, :2] / np.expand_dims(img_points[:, 2], axis=1) # scale 2D points
# print(img_points)
keep_idx_img_pts = self.select_points_in_frustum(matching_pixel, 0, 0, 1241, 376)
# print(keep_idx)
keep_idx = keep_idx_img_pts & keep_idx
# print(sum(keep_idx))
# print("+"*90)
matching_pixel = matching_pixel[keep_idx]
# cv2.namedWindow('win', cv2.WINDOW_NORMAL)
# for i in range(len(matching_pixel)):
# cv2.circle(image, (int(matching_pixel[i][0]), int(matching_pixel[i][1])), 1, (255, 255, 0), -1)
# cv2.imwrite('./vis.png',image)
# points_h = points[keep_idx]
pairing_points = np.where(keep_idx==True)[0]
pairing_images = np.concatenate(
(
np.zeros((matching_pixel.shape[0], 1), dtype=np.int64),
matching_pixel,
),
axis=1,
)
assert pairing_images.shape[1] == 3
images = [image / 255]
return pc_ref, images, pairing_points, pairing_images
def __len__(self):
return len(self.list_files)
def __getitem__(self, idx):
lidar_file = self.list_files[idx]
(
pc,
images,
pairing_points,
pairing_images,
) = self.map_pointcloud_to_image(lidar_file)
# points = np.fromfile(lidar_file, dtype=np.float32).reshape((-1, 4))
# get the points (4th coordinate is the point intensity)
intensity = torch.tensor(pc[:, 3:] + 1.)
pc = torch.tensor(pc[:, :3])
# print("pairing_points size: ", pairing_points.shape)
# print("pairing_images size: ", pairing_images.shape)
# print("images size: ", images[0].shape)
# print("pc size: ", pc.shape)
# images size: (900, 1600, 3)
# pc size: torch.Size([34688, 3])
# pairing_points size: (22585,)
# pairing_images size: (22585, 3)
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
# apply the transforms (augmentation)
if self.cloud_transforms:
pc = self.cloud_transforms(pc)
if self.mixed_transforms:
(
pc,
intensity,
images,
pairing_points,
pairing_images,
) = self.mixed_transforms(
pc, intensity, images, pairing_points, pairing_images
)
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
# corresponds to a split each 1°
phi = torch.atan2(y, x) * 180 / np.pi
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
# Voxelization
# discrete_coords, indexes, inverse_indexes = sparse_quantize(
# coords_aug, return_index=True, return_inverse=True
# )
discrete_coords, indexes, inverse_indexes = sparse_quantize(coords_aug.numpy(),
return_index=True,
return_inverse=True)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
# indexes here are the indexes of points kept after the voxelization
pairing_points = inverse_indexes[pairing_points]
unique_feats = intensity[indexes]
discrete_coords = torch.cat(
(
discrete_coords,
torch.zeros(discrete_coords.shape[0], 1, dtype=torch.int32),
),
1,
)
return (
discrete_coords,
unique_feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
)
| 10,972 | 34.282958 | 154 | py |
CLIP2Scene | CLIP2Scene-main/downstream/dataloader_scannet.py | import os
import copy
import torch
import numpy as np
from PIL import Image
import MinkowskiEngine as ME
from torch.utils.data import Dataset
# import pc_utils
from plyfile import PlyData, PlyElement
import math
# from pc_utils import write_ply_rgb
import sys
sys.path.append("..")
# from MinkowskiEngine.utils import sparse_quantize
import imageio
import cv2
import random
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i, 0], points[i, 1], points[i, 2]) for i in range(points.shape[0])]
colors = [(colors[i, 0], colors[i, 1], colors[i, 2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def scannet_collate_pair_fn(batch):
(
pc,
coords,
feats,
unique_labels,
labels,
inverse_indexes,
scan_names,
) = list(zip(*batch))
len_batch = []
for batch_id, coo in enumerate(coords):
N = coords[batch_id].shape[0]
len_batch.append(N)
coords = ME.utils.batched_coordinates(coords, dtype=torch.float32)
feats = torch.cat(feats, dim=0)
# imgs = torch.cat(imgs, dim=0)
unique_labels = torch.cat(unique_labels, 0).long()
return {
"pc": pc, # point cloud
"sinput_C": coords, # discrete coordinates (ME)
"sinput_F": feats, # point features (N, 3)
# "input_I": imgs,
"len_batch": len_batch,
"labels": unique_labels,
"evaluation_labels": labels, # labels for each point
"inverse_indexes": inverse_indexes, # labels for each point
"lidar_name": scan_names
}
class scannet_Dataset(Dataset):
def __init__(self, phase, config, transforms=None):
self.scannet_root_dir = config['dataRoot_scannet']
if phase == 'train':
self.scannet_file_list = self.read_files(config['train_file'])
skip_ratio = config["dataset_skip_step"]
print("before: ", len(self.scannet_file_list))
self.scannet_file_list = sorted(self.scannet_file_list)[::skip_ratio]
print("after: ", len(self.scannet_file_list))
else:
self.scannet_file_list = self.read_files(config['val_file'])
self.voxel_size = config['voxel_size']
self.phase = phase
self.config = config
self.imageDim = (640, 480)
self.transforms = transforms
self.maxImages = 8
def read_files(self, file):
f = open(file)
lines = f.readlines()
name_list = [line.split('.')[0] for line in lines]
f.close()
return name_list
def __len__(self):
return len(self.scannet_file_list)
def read_pose_file(self, fname):
posemat = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return posemat
def read_intrinsic_file(self, fname):
intrinsic = np.asarray([[float(x[0]), float(x[1]), float(x[2]), float(x[3])] for x in
(x.split(" ") for x in open(fname).read().splitlines())])
return intrinsic
def read_txt(self, path):
# Read txt file into lines.
with open(path) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
return lines
def computeLinking(self, camera_to_world, coords, depth, link_proj_threshold, intrinsic_color, intrinsic_depth, imageDim):
"""
:param camera_to_world: 4 x 4
:param coords: N x 3 format
:param depth: H x W format
:intrinsic_depth: 4 x 4
:intrinsic_color: 4 x 4, not used currently
:return: linking, N x 3 format, (H,W,mask)
"""
# print("imageDim ", imageDim)
intrinsic = intrinsic_depth
link = np.zeros((3, coords.shape[0]), dtype=float)
coordsNew = np.concatenate([coords, np.ones([coords.shape[0], 1])], axis=1).T #4 x N
assert coordsNew.shape[0] == 4, "[!] Shape error"
world_to_camera = np.linalg.inv(camera_to_world) # 4 x 4
p = np.matmul(world_to_camera, coordsNew) # 4 x N
p[0] = (p[0] * intrinsic[0][0]) / p[2] + intrinsic[0][2]
p[1] = (p[1] * intrinsic[1][1]) / p[2] + intrinsic[1][2]
pi = p
inside_mask = (pi[0] >= 0) * (pi[1] >= 0) * (pi[0] <= imageDim[1] - 1) * (pi[1] <= imageDim[0]-1)
occlusion_mask = np.abs(depth[np.round(pi[1][inside_mask]).astype(np.int), np.round(pi[0][inside_mask]).astype(np.int)] - p[2][inside_mask]) <= link_proj_threshold
inside_mask[inside_mask == True] = occlusion_mask
link[0][inside_mask] = pi[1][inside_mask]
link[1][inside_mask] = pi[0][inside_mask]
link[2][inside_mask] = 1
return link.T
def __getitem__(self, idx):
# _new_semantic.npy: 0~19, .npy: 1~20
path = os.path.join(self.scannet_root_dir, self.scannet_file_list[idx], self.scannet_file_list[idx]+"_new_semantic.npy")
# path = os.path.join(self.scannet_root_dir, self.file_list[idx], self.file_list[idx]+".npy")
data = torch.from_numpy(np.load(path))
coords, feats, labels = data[:, :3], data[:, 3: 6], data[:, -1]
labels[labels == -100] = -1
labels += 1
pc = coords.clone()
# coords, labels = data[:, :3], data[:, 9:]
# sceneName = self.scannet_file_list[idx]
# write_ply_rgb(coords, feats, "visual/visual_%s.ply" % sceneName)
feats = feats / 127.5 - 1
coords = (coords - coords.mean(0)) / self.voxel_size
# print(feats)
# feats = torch.ones(len(coords), 1)
# frame_names = []
# imgs = []
# links = []
#
# intrinsic_color = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_color.txt'))
# intrinsic_depth = self.read_intrinsic_file(os.path.join(self.config['dataRoot_images'], sceneName, 'intrinsics_depth.txt'))
#
# for framename in os.listdir(os.path.join(self.config['dataRoot_images'], sceneName, 'color')):
# frame_names.append(framename.split('.')[0])
#
# pairing_points = []
# pairing_images = []
#
# frame_names = random.sample(frame_names, min(self.maxImages, len(frame_names)))
#
# for i, frameid in enumerate(frame_names):
# f = os.path.join(self.config['dataRoot_images'], sceneName, 'color', frameid + '.jpg')
# img = imageio.imread(f) / 255
# # print("before ", img.shape)
# img = cv2.resize(img, self.imageDim)
# # print("after ", img.shape)
# # images.append(im / 255)
# depth = imageio.imread(f.replace('color', 'depth').replace('.jpg', '.png')) / 1000.0 # convert to meter
# posePath = f.replace('color', 'pose').replace('.jpg', '.txt')
# pose = self.read_pose_file(posePath)
#
# # ply_filename = os.path.join('%s_vh_clean_2.ply' % (sceneName))
# # label_filename = os.path.join('%s_vh_clean_2.labels.ply' % (sceneName))
#
# # print("depth", depth.shape)
# # print("img", img.shape)
#
# # link = np.ones([coords.shape[0], 3])
# link = self.computeLinking(pose, coords, depth, 0.05, intrinsic_color, intrinsic_depth, depth.shape)
#
# pairing_point = torch.from_numpy(np.argwhere(link[:, 2] == 1)).squeeze()
# pairing_points.append(pairing_point)
#
# link = torch.from_numpy(link).int()
# # link_index = link[:, 2] == 1
#
# imgs.append(torch.from_numpy(img.transpose((2, 0, 1))))
#
# pairing_image = link[pairing_point, :2]
# pairing_images.append(torch.cat((torch.ones(pairing_point.shape[0], 1) * i,
# pairing_image), dim=1))
'''
# print image-point correspondence
img_pixel = tuple(pairing_image.T.long())
img_RGB = img[img_pixel]
print(coords[pairing_point].shape, "img_RGB ", img_RGB.shape)
write_ply_rgb(coords[pairing_point], img_RGB*255, "visual/visual_%s_%s.ply" % (frameid, i))
'''
# imgs = torch.stack(imgs)
# pairing_points = torch.cat(pairing_points, dim=0)
# pairing_images = torch.cat(pairing_images, dim=0)
if self.transforms:
coords = self.transforms(coords.float())
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
# indexes here are the indexes of points kept after the voxelization
# pairing_points = inverse_indexes[pairing_points]
unique_labels = labels[indexes]
feats = feats[indexes]
# assert pairing_points.shape[0] == pairing_images.shape[0]
packages = (pc, discrete_coords, feats, unique_labels, labels, inverse_indexes, self.scannet_file_list[idx])
return packages
def make_data_loader(config, phase, num_threads=0):
"""
Create the data loader for a given phase and a number of threads.
This function is not used with pytorch lightning, but is used when evaluating.
"""
# select the desired transformations
if phase == "train":
transforms = make_transforms_clouds(config)
else:
transforms = None
# instantiate the dataset
dset = scannet_Dataset(phase=phase, transforms=transforms, config=config)
collate_fn = scannet_collate_pair_fn
batch_size = config["batch_size"] // config["num_gpus"]
# create the loader
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
shuffle=phase == "train",
num_workers=num_threads,
collate_fn=collate_fn,
pin_memory=False,
drop_last=phase == "train",
worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),
)
return loader
| 10,704 | 35.660959 | 171 | py |
CLIP2Scene | CLIP2Scene-main/downstream/lightning_datamodule.py | import torch
import numpy as np
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from utils.transforms import make_transforms_clouds
from downstream.dataloader_kitti import SemanticKITTIDataset
from downstream.dataloader_nuscenes import NuScenesDataset, custom_collate_fn
from downstream.dataloader_scannet import scannet_Dataset, scannet_collate_pair_fn
class DownstreamDataModule(pl.LightningDataModule):
"""
The equivalent of a DataLoader for pytorch lightning.
"""
def __init__(self, config):
super().__init__()
self.config = config
# in multi-GPU the actual batch size is that
self.batch_size = config["batch_size"] // config["num_gpus"]
# the CPU workers are split across GPU
self.num_workers = max(config["num_threads"] // config["num_gpus"], 1)
def setup(self, stage):
# setup the dataloader: this function is automatically called by lightning
transforms = make_transforms_clouds(self.config)
if self.config["dataset"].lower() == "nuscenes":
Dataset = NuScenesDataset
elif self.config["dataset"].lower() == "scannet":
Dataset = scannet_Dataset
elif self.config["dataset"].lower() in ("kitti", "semantickitti"):
Dataset = SemanticKITTIDataset
else:
raise Exception(f"Unknown dataset {self.config['dataset']}")
if self.config["training"] in ("parametrize", "parametrizing"):
phase_train = "parametrizing"
phase_val = "verifying"
else:
phase_train = "train"
phase_val = "val"
self.train_dataset = Dataset(
phase=phase_train, transforms=transforms, config=self.config
)
if Dataset == NuScenesDataset:
self.val_dataset = Dataset(
phase=phase_val,
config=self.config,
cached_nuscenes=self.train_dataset.nusc,
)
else:
self.val_dataset = Dataset(phase=phase_val, config=self.config)
def train_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=True,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
def val_dataloader(self):
if self.config["num_gpus"]:
num_workers = self.config["num_threads"] // self.config["num_gpus"]
else:
num_workers = self.config["num_threads"]
if self.config["dataset"].lower() == "nuscenes":
default_collate_pair_fn = minkunet_collate_pair_fn
elif self.config["dataset"].lower() == "kitti":
default_collate_pair_fn = kitti_collate_pair_fn
elif self.config["dataset"].lower() == "scannet":
default_collate_pair_fn = scannet_collate_pair_fn
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=num_workers,
collate_fn=default_collate_pair_fn,
pin_memory=True,
drop_last=False,
worker_init_fn=lambda id: np.random.seed(
torch.initial_seed() // 2 ** 32 + id
),
)
#
# def train_dataloader(self):
# # construct the training dataloader: this function is automatically called
# # by lightning
# return DataLoader(
# self.train_dataset,
# batch_size=self.batch_size,
# shuffle=True,
# num_workers=self.num_workers,
# collate_fn=custom_collate_fn,
# pin_memory=True,
# drop_last=False,
# worker_init_fn=lambda id: np.random.seed(
# torch.initial_seed() // 2 ** 32 + id
# ),
# )
#
# def val_dataloader(self):
# # construct the validation dataloader: this function is automatically called
# # by lightning
# return DataLoader(
# self.val_dataset,
# batch_size=self.batch_size,
# shuffle=False,
# num_workers=self.num_workers,
# collate_fn=custom_collate_fn,
# pin_memory=True,
# drop_last=False,
# worker_init_fn=lambda id: np.random.seed(
# torch.initial_seed() // 2 ** 32 + id
# ),
# )
| 5,158 | 35.85 | 86 | py |
CLIP2Scene | CLIP2Scene-main/downstream/evaluate.py | import numpy as np
import torch
from tqdm import tqdm
from copy import deepcopy
from MinkowskiEngine import SparseTensor
# from torchsparse import SparseTensor
from utils.metrics import compute_IoU
CLASSES_NUSCENES = [
"barrier",
"bicycle",
"bus",
"car",
"construction_vehicle",
"motorcycle",
"pedestrian",
"traffic_cone",
"trailer",
"truck",
"driveable_surface",
"other_flat",
"sidewalk",
"terrain",
"manmade",
"vegetation",
]
CLASSES_KITTI = [
"car",
"bicycle",
"motorcycle",
"truck",
"other-vehicle",
"person",
"bicyclist",
"motorcyclist",
"road",
"parking",
"sidewalk",
"other-ground",
"building",
"fence",
"vegetation",
"trunk",
"terrain",
"pole",
"traffic-sign",
]
CLASSES_scannet = [
'wall',
'floor',
'cabinet',
'bed',
'chair',
'sofa',
'table',
'door',
'window',
'bookshelf',
'picture',
'counter',
'desk',
'curtain',
'refrigerator',
'shower curtain',
'toilet',
'sink',
'bathtub',
'other furniture'
]
def evaluate(model, dataloader, config):
"""
Function to evaluate the performances of a downstream training.
It prints the per-class IoU, mIoU and fwIoU.
"""
model.eval()
with torch.no_grad():
i = 0
full_predictions = []
ground_truth = []
for batch in tqdm(dataloader):
lidar_names = batch["lidar_name"]
sparse_input = SparseTensor(batch["sinput_F"].float(), batch["sinput_C"].int(), device=0)
# print(sparse_input, model)
output_points = model(sparse_input)
# for spvcnn
# sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
# output_points = model(sparse_input.to(0))
if config["ignore_index"]:
output_points[:, config["ignore_index"]] = -1e6
torch.cuda.empty_cache()
preds = output_points.argmax(1).cpu()
offset = 0
# print(output_points)
# print(batch["evaluation_labels"][0].max())
# print(batch["evaluation_labels"][0].min())
for j, lb in enumerate(batch["len_batch"]):
# print(batch["len_batch"], j)
inverse_indexes = batch["inverse_indexes"][j]
predictions = preds[inverse_indexes + offset]
# print(predictions.shape, batch["evaluation_labels"][j].shape)
# remove the ignored index entirely
full_predictions.append(predictions)
ground_truth.append(deepcopy(batch["evaluation_labels"][j]))
offset += lb
# m_IoU, fw_IoU, per_class_IoU = compute_IoU(
# torch.cat([predictions]),
# torch.cat([deepcopy(batch["evaluation_labels"][j])]),
# config["model_n_out"],
# ignore_index=0,
# )
'''
class_ind = 4
lidar_name = lidar_names[j].split('/')[-1]
root_path = '/mnt/lustre/chenrunnan/projects/SLidR/visual/annotation_free/'
# lidar_name_path = root_path + str(per_class_IoU[class_ind]) + lidar_name
lidar_name_path = root_path + lidar_name
save_file = predictions.unsqueeze(-1).numpy()
# save_file = np.expand_dims(predictions)
# if per_class_IoU[class_ind] != 1 and per_class_IoU[class_ind] > 0.4:
np.array(save_file).astype(np.uint8).tofile(lidar_name_path)
'''
# import pdb
# pdb.set_trace()
i += j
full_predictions = torch.cat(full_predictions).int()
ground_truth = torch.cat(ground_truth).int()
# if config["dataset"].lower() == "scannet":
# ground_truth += 1
# ground_truth[ground_truth == -99] = 0
# print(full_predictions.shape, torch.cat(ground_truth).shape)
# print(torch.cat(full_predictions), torch.cat(ground_truth))
print(ground_truth)
m_IoU, fw_IoU, per_class_IoU = compute_IoU(
full_predictions,
ground_truth,
config["model_n_out"],
ignore_index=0,
)
# import pdb
# pdb.set_trace()
print("Per class IoU:")
if config["dataset"].lower() == "nuscenes":
print(
*[
f"{a:20} - {b:.3f}"
for a, b in zip(CLASSES_NUSCENES, (per_class_IoU).numpy())
],
sep="\n",
)
elif config["dataset"].lower() == "kitti":
print(
*[
f"{a:20} - {b:.3f}"
for a, b in zip(CLASSES_KITTI, (per_class_IoU).numpy())
],
sep="\n",
)
elif config["dataset"].lower() == "scannet":
print(
*[
f"{a:20} - {b:.3f}"
for a, b in zip(CLASSES_scannet, (per_class_IoU).numpy())
],
sep="\n",
)
print()
print(f"mIoU: {m_IoU}")
print(f"fwIoU: {fw_IoU}")
return m_IoU
| 5,359 | 26.628866 | 101 | py |
CLIP2Scene | CLIP2Scene-main/downstream/lightning_trainer.py | import os
import torch
import torch.optim as optim
import pytorch_lightning as pl
from MinkowskiEngine import SparseTensor
# from torchsparse import SparseTensor
from downstream.criterion import DownstreamLoss, unknown_aware_infoNCE
from pytorch_lightning.utilities import rank_zero_only
from utils.metrics import confusion_matrix, compute_IoU_from_cmatrix
import MinkowskiEngine as ME
class LightningDownstream(pl.LightningModule):
def __init__(self, model, config):
super().__init__()
self.model = model
self.best_mIoU = 0.0
self.metrics = {"val mIoU": [], "val_loss": [], "train_loss": []}
self._config = config
self.train_losses = []
self.val_losses = []
self.ignore_index = config["ignore_index"]
self.n_classes = config["model_n_out"]
self.num_epochs = config["num_epochs"]
self.epoch = 0
if config["loss"].lower() == "lovasz":
self.criterion = DownstreamLoss(
ignore_index=config["ignore_index"],
device=self.device,
)
else:
self.criterion = torch.nn.CrossEntropyLoss(
ignore_index=config["ignore_index"],
)
self.mode = config["mode"]
# if self.mode == 'source_free':
# self.num_epochs = 0
if self.mode == 'zero_shot':
self.criterion = unknown_aware_infoNCE(ignore_index=config["ignore_index"], config=config)
self.working_dir = os.path.join(config["working_dir"], config["datetime"])
if os.environ.get("LOCAL_RANK", 0) == 0:
os.makedirs(self.working_dir, exist_ok=True)
def configure_optimizers(self):
if self._config.get("lr_head", None) is not None:
print("Use different learning rates between the head and trunk.")
def is_final_head(key):
return key.find('final.') != -1
param_group_head = [
param for key, param in self.model.named_parameters()
if param.requires_grad and is_final_head(key)]
param_group_trunk = [
param for key, param in self.model.named_parameters()
if param.requires_grad and (not is_final_head(key))]
param_group_all = [
param for key, param in self.model.named_parameters()
if param.requires_grad]
assert len(param_group_all) == (len(param_group_head) + len(param_group_trunk))
weight_decay = self._config["weight_decay"]
weight_decay_head = self._config["weight_decay_head"] if (self._config["weight_decay_head"] is not None) else weight_decay
parameters = [
{"params": iter(param_group_head), "lr": self._config["lr_head"], "weight_decay": weight_decay_head},
{"params": iter(param_group_trunk)}]
print(f"==> Head: #{len(param_group_head)} params with learning rate: {self._config['lr_head']} and weight_decay: {weight_decay_head}")
print(f"==> Trunk: #{len(param_group_trunk)} params with learning rate: {self._config['lr']} and weight_decay: {weight_decay}")
optimizer = optim.SGD(
parameters,
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
else:
if self._config.get("optimizer") and self._config["optimizer"] == 'adam':
print('Optimizer: AdamW')
optimizer = optim.AdamW(
self.model.parameters(),
lr=self._config["lr"],
weight_decay=self._config["weight_decay"],
)
else:
print('Optimizer: SGD')
optimizer = optim.SGD(
self.model.parameters(),
lr=self._config["lr"],
momentum=self._config["sgd_momentum"],
dampening=self._config["sgd_dampening"],
weight_decay=self._config["weight_decay"],
)
if self._config.get("scheduler") and self._config["scheduler"] == 'steplr':
print('Scheduler: StepLR')
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, int(.9 * self._config["num_epochs"]),
)
else:
print('Scheduler: Cosine')
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer, self._config["num_epochs"]
)
return [optimizer], [scheduler]
def optimizer_zero_grad(self, epoch, batch_idx, optimizer, optimizer_idx):
# set_to_none=True is a modest speed-up
optimizer.zero_grad(set_to_none=True)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
if self._config["freeze_layers"]:
self.model.eval()
else:
self.model.train()
sparse_input = ME.SparseTensor(batch["sinput_F"].float(), coordinates=batch["sinput_C"].int())
# sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
output_points = self.model(sparse_input)
# print(output_points.shape, batch["labels"].shape, "=================================")
loss = self.criterion(output_points, batch["labels"])
# if self.mode == 'source_free':
# empty the cache to reduce the memory requirement: ME is known to slowly
# filling the cache otherwise
torch.cuda.empty_cache()
self.log(
"train_loss", loss, on_step=True, on_epoch=True, prog_bar=True, logger=True
)
self.train_losses.append(loss.detach().cpu())
return loss
def training_epoch_end(self, outputs):
self.epoch += 1
if self.epoch == self.num_epochs:
self.save()
def validation_step(self, batch, batch_idx):
# sparse_input = SparseTensor(batch["sinput_F"], batch["sinput_C"])
sparse_input = ME.SparseTensor(batch["sinput_F"].float(), coordinates=batch["sinput_C"].int())
output_points = self.model(sparse_input)
loss = self.criterion(output_points, batch["labels"])
self.val_losses.append(loss.detach().cpu())
self.log(
"val_loss", loss, on_epoch=True, prog_bar=True, logger=True, sync_dist=True
)
# Ensure we ignore the index 0
# (probably not necessary after some training)
output_points = output_points.softmax(1)
if self.ignore_index is not None:
output_points[:, self.ignore_index] = 0.0
preds = []
labels = []
offset = 0
output_points = output_points.argmax(1)
for i, lb in enumerate(batch["len_batch"]):
preds.append(output_points[batch["inverse_indexes"][i] + offset])
labels.append(batch["evaluation_labels"][i])
offset += lb
preds = torch.cat(preds, dim=0).int()
labels = torch.cat(labels, dim=0).int()
c_matrix = confusion_matrix(preds, labels, self.n_classes)
return loss, c_matrix
def validation_epoch_end(self, outputs):
c_matrix = sum([o[1] for o in outputs])
# remove the ignore_index from the confusion matrix
c_matrix = torch.sum(self.all_gather(c_matrix), 0)
m_IoU, fw_IoU, per_class_IoU = compute_IoU_from_cmatrix(
c_matrix, self.ignore_index
)
self.train_losses = []
self.val_losses = []
self.log("m_IoU", m_IoU, prog_bar=True, logger=True, sync_dist=False)
self.log("fw_IoU", fw_IoU, prog_bar=True, logger=True, sync_dist=False)
if self.epoch == self._config["num_epochs"]:
self.save()
@rank_zero_only
def save(self):
path = os.path.join(self.working_dir, "model.pt")
torch.save(
{"model_points": self.model.state_dict(), "config": self._config}, path
)
| 8,081 | 40.446154 | 148 | py |
CLIP2Scene | CLIP2Scene-main/downstream/dataloader_nuscenes.py | import os
import torch
import numpy as np
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
# from MinkowskiEngine.utils import sparse_quantize
from utils.transforms import make_transforms_clouds
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
# from torchsparse.utils.quantize import sparse_quantize
# from petrel_client.client import Client
import json
# parametrizing set, to try out different parameters
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def custom_collate_fn(list_data):
"""
Custom collate function adapted for creating batches with MinkowskiEngine.
"""
input = list(zip(*list_data))
# whether the dataset returns labels
labelized = len(input) == 7
# evaluation_labels are per points, labels are per voxels
if labelized:
xyz, coords, feats, labels, evaluation_labels, inverse_indexes, lidar_name = input
else:
xyz, coords, feats, inverse_indexes = input
# for names
# name_list = []
# print(feats[0].size())
coords_batch, len_batch = [], []
# create a tensor of coordinates of the 3D points
# note that in ME, batche index and point indexes are collated in the same dimension
for batch_id, coo in enumerate(coords):
N = coords[batch_id].shape[0]
coords_batch.append(
torch.cat((coo, torch.ones(N, 1, dtype=torch.int32) * batch_id), 1)
)
len_batch.append(N)
# for batch_id, coo in enumerate(coords):
# N = coords[batch_id].shape[0]
# coords_batch.append(
# torch.cat((torch.ones(N, 1, dtype=torch.int32) * batch_id, coo), 1)
# )
# len_batch.append(N)
# Collate all lists on their first dimension
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats, 0).float()
if labelized:
labels_batch = torch.cat(labels, 0).long()
return {
"pc": xyz, # point cloud
"sinput_C": coords_batch, # discrete coordinates (ME)
"sinput_F": feats_batch, # point features (N, 3)
"len_batch": len_batch, # length of each batch
"labels": labels_batch, # labels for each (voxelized) point
"evaluation_labels": evaluation_labels, # labels for each point
"inverse_indexes": inverse_indexes, # labels for each point
"lidar_name": lidar_name
}
else:
return {
"pc": xyz,
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"len_batch": len_batch,
"inverse_indexes": inverse_indexes,
}
class NuScenesDataset(Dataset):
"""
Dataset returning a lidar scene and associated labels.
"""
def __init__(self, phase, config, transforms=None, cached_nuscenes=None):
self.phase = phase
self.labels = self.phase != "test"
self.transforms = transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
if phase != "test":
if cached_nuscenes is not None:
self.nusc = cached_nuscenes
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot="s3://liuyouquan/nuScenes/", verbose=False
)
else:
self.nusc = NuScenes(
version="v1.0-test", dataroot="s3://liuyouquan/nuScenes/", verbose=False
)
self.list_tokens = []
# a skip ratio can be used to reduce the dataset size
# and accelerate experiments
if phase in ("val", "verifying"):
skip_ratio = 1
else:
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
self.dataroot = "s3://liuyouquan/nuScenes" #todo
# self.client = Client('~/.petreloss.conf')
# if phase in ("train", "val", "test"):
# phase_scenes = create_splits_scenes()[phase]
# elif phase == "parametrizing":
# phase_scenes = list(
# set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
# )
# elif phase == "verifying":
# phase_scenes = CUSTOM_SPLIT
if phase == "train":
with open('./list_keyframes_train.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_train.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
if phase == "val":
with open('./list_keyframes_val.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_val.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
if phase == "test":
with open('./list_keyframes_test.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_test.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
if phase == "parametrizing":
with open('./list_keyframes_parametrizing.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_parametrizing.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
elif phase == "verifying":
with open('./list_keyframes_verifying.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_verifying.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
print("before: ", len(self.list_keyframes))
self.list_keyframes = self.list_keyframes[::skip_ratio]
print("after: ", len(self.list_keyframes))
# skip_counter = 0
# create a list of all keyframe scenes
# for scene_idx in range(len(self.nusc.scene)):
# scene = self.nusc.scene[scene_idx]
# if scene["name"] in phase_scenes:
# skip_counter += 1
# if skip_counter % skip_ratio == 0:
# self.create_list_of_tokens(scene)
# labels' names lookup table
self.eval_labels = {
0: 0, 1: 0, 2: 7, 3: 7, 4: 7, 5: 0, 6: 7, 7: 0, 8: 0, 9: 1, 10: 0, 11: 0,
12: 8, 13: 0, 14: 2, 15: 3, 16: 3, 17: 4, 18: 5, 19: 0, 20: 0, 21: 6, 22: 9,
23: 10, 24: 11, 25: 12, 26: 13, 27: 14, 28: 15, 29: 0, 30: 16, 31: 0,
}
# def create_list_of_tokens(self, scene):
# # Get first in the scene
# current_sample_token = scene["first_sample_token"]
#
# # Loop to get all successive keyframes
# while current_sample_token != "":
# current_sample = self.nusc.get("sample", current_sample_token)
# next_sample_token = current_sample["next"]
# self.list_tokens.append(current_sample["data"]["LIDAR_TOP"])
# current_sample_token = next_sample_token
def __len__(self):
return len(self.list_keyframes)
def __getitem__(self, idx):
lidar_token = self.list_keyframes[idx]
key_ = lidar_token["LIDAR_TOP"]
pcl_path = self.dataroot + self.frames_corrs_info[key_]["lidar_name"].replace("samples", "")
# pc_original = LidarPointCloud.from_file(pcl_path)
# pc_ref = pc_original.points
# pointsensor = self.nusc.get("sample_data", lidar_token)
# pcl_path = os.path.join(self.nusc.dataroot, pointsensor["filename"])
points = LidarPointCloud.from_file(pcl_path).points.T
# get the points (4th coordinate is the point intensity)
pc = points[:, :3]
if self.labels:
# lidarseg_labels_filename = os.path.join(
# self.nusc.dataroot, self.nusc.get("lidarseg", lidar_token)["filename"]
# )
lidarseg_labels_filename = self.dataroot + "/" + self.frames_corrs_info[key_]["labels_name"]
points_labels = np.fromfile(lidarseg_labels_filename, dtype=np.uint8)
# points_labels = np.frombuffer(self.client.get(lidarseg_labels_filename, update_cache=True), dtype=np.uint8)
pc = torch.tensor(pc)
# apply the transforms (augmentation)
if self.transforms:
pc = self.transforms(pc)
if self.cylinder:
# Transform to cylinder coordinate and scale for given voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
# corresponds to a split each 1°
phi = torch.atan2(y, x) * 180 / np.pi
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
# Voxelization for spvcnn
# discrete_coords, indexes, inverse_indexes = sparse_quantize(
# coords_aug.numpy(), return_index=True, return_inverse=True
# )
# discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
# use those voxels features
unique_feats = torch.tensor(points[indexes][:, 3:])
# print(((unique_feats) != 0).sum() / unique_feats.shape[0])
if self.labels:
points_labels = torch.tensor(
np.vectorize(self.eval_labels.__getitem__)(points_labels),
dtype=torch.int32,
)
unique_labels = points_labels[indexes]
lidar_name = self.frames_corrs_info[key_]["labels_name"]
if self.labels:
return (
pc,
discrete_coords,
unique_feats,
unique_labels,
points_labels,
inverse_indexes,
lidar_name,
)
else:
return pc, discrete_coords, unique_feats, inverse_indexes
def make_data_loader(config, phase, num_threads=0):
"""
Create the data loader for a given phase and a number of threads.
This function is not used with pytorch lightning, but is used when evaluating.
"""
# select the desired transformations
if phase == "train":
transforms = make_transforms_clouds(config)
else:
transforms = None
# instantiate the dataset
dset = NuScenesDataset(phase=phase, transforms=transforms, config=config)
collate_fn = custom_collate_fn
batch_size = config["batch_size"] // config["num_gpus"]
# create the loader
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
shuffle=phase == "train",
num_workers=num_threads,
collate_fn=collate_fn,
pin_memory=False,
drop_last=phase == "train",
worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),
)
return loader
| 12,825 | 37.866667 | 149 | py |
CLIP2Scene | CLIP2Scene-main/downstream/model_builder.py | import torch
from model import MinkUNet, SPVCNN
def load_state_with_same_shape(model, weights):
"""
Load common weights in two similar models
(for instance between a pretraining and a downstream training)
"""
model_state = model.state_dict()
if list(weights.keys())[0].startswith("model."):
weights = {k.partition("model.")[2]: weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith("model_points."):
weights = {k.partition("model_points.")[2]: weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith("module."):
print("Loading multigpu weights with module. prefix...")
weights = {k.partition("module.")[2]: weights[k] for k in weights.keys()}
if list(weights.keys())[0].startswith("encoder."):
print("Loading multigpu weights with encoder. prefix...")
weights = {k.partition("encoder.")[2]: weights[k] for k in weights.keys()}
filtered_weights = {
k: v
for k, v in weights.items()
if (k in model_state and v.size() == model_state[k].size())
}
removed_weights = {
k: v
for k, v in weights.items()
if not (k in model_state and v.size() == model_state[k].size())
}
print("Loading weights:" + ", ".join(filtered_weights.keys()))
print("")
print("Not loading weights:" + ", ".join(removed_weights.keys()))
return filtered_weights
def make_model(config, load_path=None):
"""
Build the points model according to what is in the config
"""
assert not config[
"normalize_features"
], "You shouldn't normalize features for the downstream task"
# model = MinkUNet(1, config["model_n_out"], config)
# model = SPVCNN(1, config["model_n_out"], config)
model = MinkUNet(3, config["model_n_out"], config)
if load_path:
print("Training with pretrained model")
checkpoint = torch.load(load_path, map_location="cpu")
if "config" in checkpoint:
for cfg in ("voxel_size", "cylindrical_coordinates"):
assert checkpoint["config"][cfg] == config[cfg], (
f"{cfg} is not consistant. "
f"Checkpoint: {checkpoint['config'][cfg]}, "
f"Config: {config[cfg]}."
)
if set(checkpoint.keys()) == set(["epoch", "model", "optimizer", "train_criterion"]):
print("Pre-trained weights are coming from DepthContrast.")
pretraining_epochs = checkpoint["epoch"]
print(f"==> Number of pre-training epochs {pretraining_epochs}")
checkpoint = checkpoint["model"]
if list(checkpoint.keys())[0].startswith("module."):
print("Loading multigpu weights with module. prefix...")
checkpoint = {k.partition("module.")[2]: checkpoint[k] for k in checkpoint.keys()}
voxel_net_suffix = "trunk.2."
checkpoint = {
key.partition(voxel_net_suffix)[2]: checkpoint[key]
for key in checkpoint.keys() if key.startswith(voxel_net_suffix)
}
print(f"==> Number of loaded weight blobs {len(checkpoint)}")
checkpoint = {"model_points": checkpoint}
key = "model_points" if "model_points" in checkpoint else "state_dict"
filtered_weights = load_state_with_same_shape(model, checkpoint[key])
model_dict = model.state_dict()
model_dict.update(filtered_weights)
model.load_state_dict(model_dict)
if config["freeze_layers"]:
for param in list(model.parameters())[:-2]:
param.requires_grad = False
return model
| 3,677 | 41.275862 | 98 | py |
CLIP2Scene | CLIP2Scene-main/downstream/criterion.py | """
Lovasz-Softmax and Jaccard hinge loss in PyTorch
Maxim Berman 2018 ESAT-PSI KU Leuven (MIT License)
https://github.com/edwardzhou130/PolarSeg/blob/master/network/lovasz_losses.py
"""
from __future__ import print_function, division
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
# import evaluate
from .evaluate import CLASSES_NUSCENES
from .evaluate import CLASSES_KITTI
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def iou_binary(preds, labels, EMPTY=1.0, ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / float(union)
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou
def iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
# The ignored label is sometimes among predicted classes
if i != ignore:
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / float(union))
ious.append(iou)
# mean accross images if per_image
ious = [mean(iou) for iou in zip(*ious)]
return 100 * np.array(ious)
# --------------------------- BINARY LOSSES ---------------------------
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(
lovasz_hinge_flat(
*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)
)
for log, lab in zip(logits, labels)
)
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * Variable(signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
class StableBCELoss(torch.nn.modules.Module):
def __init__(self):
super(StableBCELoss, self).__init__()
def forward(self, input, target):
neg_abs = -input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
def binary_xloss(logits, labels, ignore=None):
"""
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel
(between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
"""
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
# --------------------------- MULTICLASS LOSSES ---------------------------
class DownstreamLoss(nn.Module):
"""
Custom which is the sum of a lovasz loss and a crossentropy.
Main class to instantiate in the code.
"""
def __init__(self, weights=None, ignore_index=None, device="cpu"):
super(DownstreamLoss, self).__init__()
self.ignore_index = ignore_index
if weights is None:
self.crossentropy = torch.nn.CrossEntropyLoss()
else:
self.crossentropy = torch.nn.CrossEntropyLoss(
weight=torch.tensor(weights).float().to(device)
)
def forward(self, probas, labels):
if self.ignore_index is not None:
valid = labels != self.ignore_index
probas = probas[valid]
labels = labels[valid]
loss1 = self.crossentropy(probas, labels)
loss2 = lovasz_softmax_flat(probas.softmax(-1), labels)
return loss1 + loss2
class unknown_aware_infoNCE(nn.Module):
"""
Custom which is the sum of a lovasz loss and a crossentropy.
Main class to instantiate in the code.
"""
def __init__(self, ignore_index=None, config=None):
super(unknown_aware_infoNCE, self).__init__()
self.ignore_index = ignore_index
# self.seen_classes =
self.unseen_classes = ['motorcycle', 'trailer', 'terrain', 'traffic_cone']
self.CLASS_LABELS = CLASSES_NUSCENES
if config['dataset'] == 'kitti':
self.CLASS_LABELS = CLASSES_KITTI
self.seen_class_index = list(range(len(self.CLASS_LABELS)))
for item in self.unseen_classes:
index = self.CLASS_LABELS.index(item)
# self.unseen_index.append(index)
self.seen_class_index.remove(index)
self.crossentropy = torch.nn.CrossEntropyLoss()
def pseudo_supervised(self, predictions):
if predictions.size()[0] == 0: return 0
predictions = torch.softmax(predictions, dim=1)
loss = torch.mean(torch.sum(predictions[:, self.seen_class_index], dim=1))
# loss += torch.mean(1 - torch.sum(predictions[:, self.unseen_index], dim=1))
return loss
def forward(self, probas, labels):
for item in self.unseen_classes:
index = self.CLASS_LABELS.index(item)
labels[labels == index] = -200
seen_index = ((labels != self.ignore_index) & (labels != -200))
unseen_index = labels == -200
import pdb
pdb.set_trace()
loss1 = self.crossentropy(probas[seen_index], labels[seen_index])
loss2 = self.pseudo_supervised(probas[unseen_index])
return loss1 + loss2
def lovasz_softmax(probas, labels, classes="present", per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction
(between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of
size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a
list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(
lovasz_softmax_flat(
*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore),
classes=classes
)
for prob, lab in zip(probas, labels)
)
else:
loss = lovasz_softmax_flat(
*flatten_probas(probas, labels, ignore), classes=classes
)
return loss
def lovasz_softmax_flat(probas, labels, classes="present"):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels,
or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.0
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if classes == "present" and fg.sum() == 0:
continue
if C == 1:
if len(classes) > 1:
raise ValueError("Sigmoid output possible only with 1 class")
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
elif probas.dim() == 5:
# 3D segmentation
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H * W)
B, C, H, W = probas.size()
# B * H * W, C = P, C
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = labels != ignore
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def xloss(logits, labels, ignore=None):
"""
Cross entropy loss
"""
return F.cross_entropy(logits, Variable(labels), ignore_index=255)
def jaccard_loss(probas, labels, ignore=None, smooth=100, bk_class=None):
"""
Something wrong with this loss
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction.
Interpreted as binary (sigmoid) output with outputs of
size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or
a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
true_1_hot = torch.eye(vprobas.shape[1])[vlabels]
if bk_class:
one_hot_assignment = torch.ones_like(vlabels)
one_hot_assignment[vlabels == bk_class] = 0
one_hot_assignment = one_hot_assignment.float().unsqueeze(1)
true_1_hot = true_1_hot * one_hot_assignment
true_1_hot = true_1_hot.to(vprobas.device)
intersection = torch.sum(vprobas * true_1_hot)
cardinality = torch.sum(vprobas + true_1_hot)
loss = (intersection + smooth / (cardinality - intersection + smooth)).mean()
return (1 - loss) * smooth
def hinge_jaccard_loss(
probas, labels, ignore=None, classes="present", hinge=0.1, smooth=100
):
"""
Multi-class Hinge Jaccard loss
probas: [B, C, H, W] Variable, class probabilities at each prediction.
Interpreted as binary (sigmoid) output with outputs of
size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels,
or a list of classes to average.
ignore: void class labels
"""
vprobas, vlabels = flatten_probas(probas, labels, ignore)
C = vprobas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ["all", "present"] else classes
for c in class_to_sum:
if c in vlabels:
c_sample_ind = vlabels == c
cprobas = vprobas[c_sample_ind, :]
non_c_ind = np.array([a for a in class_to_sum if a != c])
class_pred = cprobas[:, c]
max_non_class_pred = torch.max(cprobas[:, non_c_ind], dim=1)[0]
TP = (
torch.sum(torch.clamp(class_pred - max_non_class_pred, max=hinge) + 1.0)
+ smooth
)
FN = torch.sum(
torch.clamp(max_non_class_pred - class_pred, min=-hinge) + hinge
)
if (~c_sample_ind).sum() == 0:
FP = 0
else:
nonc_probas = vprobas[~c_sample_ind, :]
class_pred = nonc_probas[:, c]
max_non_class_pred = torch.max(nonc_probas[:, non_c_ind], dim=1)[0]
FP = torch.sum(
torch.clamp(class_pred - max_non_class_pred, max=hinge) + 1.0
)
losses.append(1 - TP / (TP + FP + FN))
if len(losses) == 0:
return 0
return mean(losses)
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(ls, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
ls = iter(ls)
if ignore_nan:
ls = ifilterfalse(isnan, ls)
try:
n = 1
acc = next(ls)
except StopIteration:
if empty == "raise":
raise ValueError("Empty mean")
return empty
for n, v in enumerate(ls, 2):
acc += v
if n == 1:
return acc
return acc / n
| 14,503 | 32.496536 | 88 | py |
CLIP2Scene | CLIP2Scene-main/downstream/dataloader_kitti.py | import os
import re
import torch
import numpy as np
from torch.utils.data import Dataset
# from MinkowskiEngine.utils import sparse_quantize
from utils.transforms import make_transforms_clouds
# from torchsparse import SparseTensor
# from torchsparse.utils.collate import sparse_collate_fn
# from torchsparse.utils.quantize import sparse_quantize
TRAIN_SET = {0, 1, 2, 3, 4, 5, 6, 7, 9, 10}
VALIDATION_SET = {8}
TEST_SET = {11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
def custom_collate_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
input = list(zip(*list_data))
labelized = len(input) == 6
if labelized:
xyz, coords, feats, labels, evaluation_labels, inverse_indexes = input
else:
xyz, coords, feats, inverse_indexes = input
coords_batch, len_batch = [], []
for batch_id, coo in enumerate(coords):
N = coords[batch_id].shape[0]
coords_batch.append(
torch.cat((coo, torch.ones(N, 1, dtype=torch.int32) * batch_id), 1)
)
len_batch.append(N)
# for batch_id, coo in enumerate(coords):
# N = coords[batch_id].shape[0]
# coords_batch.append(
# torch.cat((torch.ones(N, 1, dtype=torch.int32) * batch_id, coo), 1)
# )
# len_batch.append(N)
# coords_batch_sparse = []
# Concatenate all lists
coords_batch = torch.cat(coords_batch, 0).int()
feats_batch = torch.cat(feats, 0).float()
if labelized:
labels_batch = torch.cat(labels, 0).long()
return {
"pc": xyz, # point cloud
"sinput_C": coords_batch, # discrete coordinates (ME)
"sinput_F": feats_batch, # point features (N, 3)
"len_batch": len_batch, # length of each batch
"labels": labels_batch, # labels for each (voxelized) point
"evaluation_labels": evaluation_labels, # labels for each point
"inverse_indexes": inverse_indexes, # labels for each point
}
else:
return {
"pc": xyz,
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"len_batch": len_batch,
"inverse_indexes": inverse_indexes,
}
class SemanticKITTIDataset(Dataset):
"""
Dataset returning a lidar scene and associated labels.
Note that superpixels fonctionality have been removed.
"""
def __init__(self, phase, config, transforms=None):
self.phase = phase
self.labels = self.phase != "test"
self.transforms = transforms
self.voxel_size = config["voxel_size"]
self.cylinder = config["cylindrical_coordinates"]
# a skip ratio can be used to reduce the dataset size
# and accelerate experiments
if phase == "train":
try:
skip_ratio = config["dataset_skip_step"]
except KeyError:
skip_ratio = 1
else:
skip_ratio = 1
if phase in ("train", "parametrizing"):
phase_set = TRAIN_SET
elif phase in ("val", "verifying"):
phase_set = VALIDATION_SET
elif phase == "test":
phase_set = TEST_SET
self.list_files = []
for num in phase_set:
directory = next(
os.walk(
f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/{num:0>2d}/velodyne"
)
)
self.list_files.extend(
map(
lambda x: f"/mnt/lustre/share_data/liuyouquan/semantickitti/sequences/"
f"{num:0>2d}/velodyne/" + x,
directory[2],
)
)
self.list_files = sorted(self.list_files)[::skip_ratio]
# labels' names lookup table
self.eval_labels = {
0: 0, 1: 0, 10: 1, 11: 2, 13: 5, 15: 3, 16: 5, 18: 4, 20: 5, 30: 6, 31: 7,
32: 8, 40: 9, 44: 10, 48: 11, 49: 12, 50: 13, 51: 14, 52: 0, 60: 9, 70: 15,
71: 16, 72: 17, 80: 18, 81: 19, 99: 0, 252: 1, 253: 7, 254: 6, 255: 8,
256: 5, 257: 5, 258: 4, 259: 5,
}
def __len__(self):
return len(self.list_files)
def __getitem__(self, idx):
lidar_file = self.list_files[idx]
points = np.fromfile(lidar_file, dtype=np.float32).reshape((-1, 4))
# get the points (4th coordinate is the point intensity)
pc = points[:, :3]
if self.labels:
lidarseg_labels_filename = re.sub(
"bin", "label", re.sub("velodyne", "labels", lidar_file)
)
points_labels = (
np.fromfile(lidarseg_labels_filename, dtype=np.uint32) & 0xFFFF
)
pc = torch.tensor(pc)
# apply the transforms (augmentation)
if self.transforms:
pc = self.transforms(pc)
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
# corresponds to a split each 1°
phi = torch.atan2(y, x) * 180 / np.pi
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
# Voxelization
# discrete_coords, indexes, inverse_indexes = sparse_quantize(
# coords_aug, return_index=True, return_inverse=True
# )
# discrete_coords, indexes, inverse_indexes = sparse_quantize(coords_aug.numpy(),
# return_index=True,
# return_inverse=True)
discrete_coords, indexes, inverse_indexes = ME.utils.sparse_quantize(
coords.contiguous(), return_index=True, return_inverse=True
)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
# unique_feats = torch.tensor(points[indexes][:, 3:])
unique_feats = torch.tensor(points[indexes][:, 3:] + 1.)
# print(((unique_feats - 1) != 0).sum() / unique_feats.shape[0] )
if self.labels:
points_labels = torch.tensor(
np.vectorize(self.eval_labels.__getitem__)(points_labels),
dtype=torch.int32,
)
unique_labels = points_labels[indexes]
if self.labels:
return (
pc,
discrete_coords,
unique_feats,
unique_labels,
points_labels,
inverse_indexes,
)
else:
return pc, discrete_coords, unique_feats, inverse_indexes
def make_data_loader(config, phase, num_threads=0):
"""
Create the data loader for a given phase and a number of threads.
"""
# select the desired transformations
if phase == "train":
transforms = make_transforms_clouds(config)
else:
transforms = None
# instantiate the dataset
dset = SemanticKITTIDataset(phase=phase, transforms=transforms, config=config)
collate_fn = custom_collate_fn
batch_size = config["batch_size"] // config["num_gpus"]
# create the loader
loader = torch.utils.data.DataLoader(
dset,
batch_size=batch_size,
# shuffle=False if sampler else True,
shuffle=phase == "train",
num_workers=num_threads,
collate_fn=collate_fn,
pin_memory=False,
# sampler=sampler,
drop_last=phase == "train",
worker_init_fn=lambda id: np.random.seed(torch.initial_seed() // 2 ** 32 + id),
)
return loader | 7,816 | 33.436123 | 147 | py |
CLIP2Scene | CLIP2Scene-main/utils/savemodel.py | import torch
import os
def save_checkpoint(self):
trained_epoch = self.cur_epoch + 1
ckpt_name = self.ckpt_dir / ('checkpoint_epoch_%d' % trained_epoch)
checkpoint_state = {}
checkpoint_state['epoch'] = trained_epoch
checkpoint_state['it'] = self.it
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(self.model.module.state_dict())
else:
model_state = model_state_to_cpu(self.model.state_dict())
checkpoint_state['model_state'] = model_state
checkpoint_state['optimizer_state'] = self.optimizer.state_dict()
checkpoint_state['scaler'] = self.scaler.state_dict()
checkpoint_state['lr_scheduler_state'] = self.lr_scheduler.state_dict()
torch.save(checkpoint_state, f"{ckpt_name}.pth")
def resume(self, filename):
if not os.path.isfile(filename):
raise FileNotFoundError
self.logger.info(f"==> Loading parameters from checkpoint {filename}")
checkpoint = torch.load(filename, map_location='cpu')
# self.cur_epoch = checkpoint['epoch']
# self.start_epoch = checkpoint['epoch']
# self.it = checkpoint['it']
self.model.load_params(checkpoint['model_state'], strict=True)
# self.optimizer.load_state_dict(checkpoint['optimizer_state'])
# self.scaler.load_state_dict(checkpoint['scaler'])
# self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler_state'])
self.logger.info('==> Done')
return | 1,481 | 41.342857 | 76 | py |
CLIP2Scene | CLIP2Scene-main/utils/chamfer_distance.py | import torch
import torch.nn as nn
def compute_chamfer_distance(p1, p2):
'''
Calculate Chamfer Distance between two point sets
:param p1: size[bn, N, D]
:param p2: size[bn, M, D]
:param debug: whether need to output debug info
:return: sum of Chamfer Distance of two point sets
'''
diff = p1[:, :, None, :] - p2[:, None, :, :]
dist = torch.sum(diff*diff, dim=3)
dist1 = dist
dist2 = torch.transpose(dist, 1, 2)
dist_min1, _ = torch.min(dist1, dim=2)
dist_min2, _ = torch.min(dist2, dim=2)
return dist_min1, dist_min2
class ComputeCDLoss(nn.Module):
def __init__(self):
super(ComputeCDLoss, self).__init__()
def forward(self, recon_points, gt_points):
dist1, dist2 = compute_chamfer_distance(recon_points, gt_points)
loss = (torch.sum(dist1) + torch.sum(dist2)) / (recon_points.shape[0] + 1E-6)
# print(loss)
return loss | 934 | 25.714286 | 85 | py |
CLIP2Scene | CLIP2Scene-main/utils/prompt_engineering.py | import numpy as np
import torch
import clip
import argparse
scannet_classes = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'other furniture']
nuscenes_classes = ["barrier", "bicycle", "bus", "car", "construction vehicle", "motorcycle", "pedestrian", "traffic_cone", "trailer", "truck", "driveable surface", "other_flat", "sidewalk", "terrain", "manmade", "vegetation"]
kitti_classes = [ "car", "bicycle", "motorcycle", "truck", "other vehicle", "person", "bicyclist", "motorcyclist", "road", "parking", "sidewalk", "other ground", "building", "fence", "vegetation", "trunk", "terrain", "pole", "traffic sign"]
cityscapes_classes = ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"]
ade20k_classes = ['wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag']
coco_stuff_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', 'building', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling', 'tile ceiling', 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain', 'desk', 'dirt', 'door', 'fence', 'marble floor', 'floor', 'stone floor', 'tile floor', 'wood floor', 'flower', 'fog', 'food', 'fruit', 'furniture', 'grass', 'gravel', 'ground', 'hill', 'house', 'leaves', 'light', 'mat', 'metal', 'mirror', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement', 'pillow', 'plant', 'plastic', 'platform', 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky', 'skyscraper', 'snow', 'solid', 'stairs', 'stone', 'straw', 'structural', 'table', 'tent', 'textile', 'towel', 'tree', 'vegetable', 'brick wall', 'concrete wall', 'wall', 'panel wall', 'stone wall', 'tile wall', 'wood wall', 'water', 'waterdrops', 'blind window', 'window', 'wood']
voc_classes = ['airplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'dining table', 'dog', 'horse', 'motorbike', 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv monitor']
pascal_context_classes = ['airplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower', 'food', 'grass', 'ground', 'horse', 'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform', 'potted plant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train', 'tree', 'truck', 'tv monitor', 'wall', 'water', 'window', 'wood']
all_pascal_context_classes = ['accordion', 'airplane', 'air conditioner', 'antenna', 'artillery', 'ashtray', 'atrium', 'baby carriage', 'bag', 'ball', 'balloon', 'bamboo weaving', 'barrel', 'baseball bat', 'basket', 'basketball backboard', 'bathtub', 'bed', 'bedclothes', 'beer', 'bell', 'bench', 'bicycle', 'binoculars', 'bird', 'bird cage', 'bird feeder', 'bird nest', 'blackboard', 'board', 'boat', 'bone', 'book', 'bottle', 'bottle opener', 'bowl', 'box', 'bracelet', 'brick', 'bridge', 'broom', 'brush', 'bucket', 'building', 'bus', 'cabinet', 'cabinet door', 'cage', 'cake', 'calculator', 'calendar', 'camel', 'camera', 'camera lens', 'can', 'candle', 'candle holder', 'cap', 'car', 'card', 'cart', 'case', 'casette recorder', 'cash register', 'cat', 'cd', 'cd player', 'ceiling', 'cell phone', 'cello', 'chain', 'chair', 'chessboard', 'chicken', 'chopstick', 'clip', 'clippers', 'clock', 'closet', 'cloth', 'clothes tree', 'coffee', 'coffee machine', 'comb', 'computer', 'concrete', 'cone', 'container', 'control booth', 'controller', 'cooker', 'copying machine', 'coral', 'cork', 'corkscrew', 'counter', 'court', 'cow', 'crabstick', 'crane', 'crate', 'cross', 'crutch', 'cup', 'curtain', 'cushion', 'cutting board', 'dais', 'disc', 'disc case', 'dishwasher', 'dock', 'dog', 'dolphin', 'door', 'drainer', 'dray', 'drink dispenser', 'drinking machine', 'drop', 'drug', 'drum', 'drum kit', 'duck', 'dumbbell', 'earphone', 'earrings', 'egg', 'electric fan', 'electric iron', 'electric pot', 'electric saw', 'electronic keyboard', 'engine', 'envelope', 'equipment', 'escalator', 'exhibition booth', 'extinguisher', 'eyeglass', 'fan', 'faucet', 'fax machine', 'fence', 'ferris wheel', 'fire extinguisher', 'fire hydrant', 'fire place', 'fish', 'fish tank', 'fishbowl', 'fishing net', 'fishing pole', 'flag', 'flagstaff', 'flame', 'flashlight', 'floor', 'flower', 'fly', 'foam', 'food', 'footbridge', 'forceps', 'fork', 'forklift', 'fountain', 'fox', 'frame', 'fridge', 'frog', 'fruit', 'funnel', 'furnace', 'game controller', 'game machine', 'gas cylinder', 'gas hood', 'gas stove', 'gift box', 'glass', 'glass marble', 'globe', 'glove', 'goal', 'grandstand', 'grass', 'gravestone', 'ground', 'guardrail', 'guitar', 'gun', 'hammer', 'hand cart', 'handle', 'handrail', 'hanger', 'hard disk drive', 'hat', 'hay', 'headphone', 'heater', 'helicopter', 'helmet', 'holder', 'hook', 'horse', 'horse-drawn carriage', 'hot-air balloon', 'hydrovalve', 'ice', 'inflator pump', 'ipod', 'iron', 'ironing board', 'jar', 'kart', 'kettle', 'key', 'keyboard', 'kitchen range', 'kite', 'knife', 'knife block', 'ladder', 'ladder truck', 'ladle', 'laptop', 'leaves', 'lid', 'life buoy', 'light', 'light bulb', 'lighter', 'line', 'lion', 'lobster', 'lock', 'machine', 'mailbox', 'mannequin', 'map', 'mask', 'mat', 'match book', 'mattress', 'menu', 'metal', 'meter box', 'microphone', 'microwave', 'mirror', 'missile', 'model', 'money', 'monkey', 'mop', 'motorbike', 'mountain', 'mouse', 'mouse pad', 'musical instrument', 'napkin', 'net', 'newspaper', 'oar', 'ornament', 'outlet', 'oven', 'oxygen bottle', 'pack', 'pan', 'paper', 'paper box', 'paper cutter', 'parachute', 'parasol', 'parterre', 'patio', 'pelage', 'pen', 'pen container', 'pencil', 'person', 'photo', 'piano', 'picture', 'pig', 'pillar', 'pillow', 'pipe', 'pitcher', 'plant', 'plastic', 'plate', 'platform', 'player', 'playground', 'pliers', 'plume', 'poker', 'poker chip', 'pole', 'pool table', 'postcard', 'poster', 'pot', 'potted plant', 'printer', 'projector', 'pumpkin', 'rabbit', 'racket', 'radiator', 'radio', 'rail', 'rake', 'ramp', 'range hood', 'receiver', 'recorder', 'recreational machines', 'remote control', 'road', 'robot', 'rock', 'rocket', 'rocking horse', 'rope', 'rug', 'ruler', 'runway', 'saddle', 'sand', 'saw', 'scale', 'scanner', 'scissors', 'scoop', 'screen', 'screwdriver', 'sculpture', 'scythe', 'sewer', 'sewing machine', 'shed', 'sheep', 'shell', 'shelves', 'shoe', 'shopping cart', 'shovel', 'sidecar', 'sidewalk', 'sign', 'signal light', 'sink', 'skateboard', 'ski', 'sky', 'sled', 'slippers', 'smoke', 'snail', 'snake', 'snow', 'snowmobiles', 'sofa', 'spanner', 'spatula', 'speaker', 'speed bump', 'spice container', 'spoon', 'sprayer', 'squirrel', 'stage', 'stair', 'stapler', 'stick', 'sticky note', 'stone', 'stool', 'stove', 'straw', 'stretcher', 'sun', 'sunglass', 'sunshade', 'surveillance camera', 'swan', 'sweeper', 'swim ring', 'swimming pool', 'swing', 'switch', 'table', 'tableware', 'tank', 'tap', 'tape', 'tarp', 'telephone', 'telephone booth', 'tent', 'tire', 'toaster', 'toilet', 'tong', 'tool', 'toothbrush', 'towel', 'toy', 'toy car', 'track', 'train', 'trampoline', 'trash bin', 'tray', 'tree', 'tricycle', 'tripod', 'trophy', 'truck', 'tube', 'turtle', 'tv monitor', 'tweezers', 'typewriter', 'umbrella', 'unknown', 'vacuum cleaner', 'vending machine', 'video camera', 'video game console', 'video player', 'video tape', 'violin', 'wakeboard', 'wall', 'wallet', 'wardrobe', 'washing machine', 'watch', 'water', 'water dispenser', 'water pipe', 'water skate board', 'watermelon', 'whale', 'wharf', 'wheel', 'wheelchair', 'window', 'window blinds', 'wineglass', 'wire', 'wood', 'wool']
bg_classes = ['building', 'ground', 'grass', 'tree', 'sky']
mickey_classes = ['Mickey Mouse', 'Donald Duck'] + bg_classes
batman_classes = ['Batman', 'Joker'] + bg_classes
mario_classes = ['Mario', 'Luigi'] + bg_classes
gates_classes = ['Bill Gates', 'Steve Jobs'] + bg_classes
cityscapes_no_person_classes = ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"]
batman_ext_classes = ['Batman', 'Joker', 'James Gordon', 'The Penguin', 'Robin', 'Alfred Pennyworth', 'Catwoman', 'Harley Quinn'] + cityscapes_no_person_classes
sports_classes = ['baseball player', 'basketball player', 'soccer player', 'football player', 'person', 'background', 'wall', 'building', 'sky', 'grass', 'tree', 'ground', 'floor', 'baseball court', 'basketball court', 'soccer court', 'football court']
car_brands_classes = ['Bugatti', 'Cadillac', 'Porsche', 'Lamborghini', 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', 'background']
blur_classes = ['very blurry car', 'car', 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
car_color_classes = ['white car', 'blue car', 'red car', 'black car', 'green car', 'yellow car', 'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
prompt_templates = [
'a bad photo of a {}.', 'a photo of many {}.', 'a sculpture of a {}.', 'a photo of the hard to see {}.', 'a low resolution photo of the {}.', 'a rendering of a {}.', 'graffiti of a {}.', 'a bad photo of the {}.', 'a cropped photo of the {}.', 'a tattoo of a {}.', 'the embroidered {}.', 'a photo of a hard to see {}.', 'a bright photo of a {}.', 'a photo of a clean {}.', 'a photo of a dirty {}.', 'a dark photo of the {}.', 'a drawing of a {}.', 'a photo of my {}.', 'the plastic {}.', 'a photo of the cool {}.', 'a close-up photo of a {}.', 'a black and white photo of the {}.', 'a painting of the {}.', 'a painting of a {}.', 'a pixelated photo of the {}.', 'a sculpture of the {}.', 'a bright photo of the {}.', 'a cropped photo of a {}.', 'a plastic {}.', 'a photo of the dirty {}.', 'a jpeg corrupted photo of a {}.', 'a blurry photo of the {}.', 'a photo of the {}.', 'a good photo of the {}.', 'a rendering of the {}.', 'a {} in a video game.', 'a photo of one {}.', 'a doodle of a {}.', 'a close-up photo of the {}.', 'a photo of a {}.', 'the origami {}.', 'the {} in a video game.', 'a sketch of a {}.', 'a doodle of the {}.', 'a origami {}.', 'a low resolution photo of a {}.', 'the toy {}.', 'a rendition of the {}.', 'a photo of the clean {}.', 'a photo of a large {}.', 'a rendition of a {}.', 'a photo of a nice {}.', 'a photo of a weird {}.', 'a blurry photo of a {}.', 'a cartoon {}.', 'art of a {}.', 'a sketch of the {}.', 'a embroidered {}.', 'a pixelated photo of a {}.', 'itap of the {}.', 'a jpeg corrupted photo of the {}.', 'a good photo of a {}.', 'a plushie {}.', 'a photo of the nice {}.', 'a photo of the small {}.', 'a photo of the weird {}.', 'the cartoon {}.', 'art of the {}.', 'a drawing of the {}.', 'a photo of the large {}.', 'a black and white photo of a {}.', 'the plushie {}.', 'a dark photo of a {}.', 'itap of a {}.', 'graffiti of the {}.', 'a toy {}.', 'itap of my {}.', 'a photo of a cool {}.', 'a photo of a small {}.', 'a tattoo of the {}.', 'there is a {} in the scene.', 'there is the {} in the scene.', 'this is a {} in the scene.', 'this is the {} in the scene.', 'this is one {} in the scene.',
]
def parse_args():
parser = argparse.ArgumentParser(description='Prompt engeering script')
parser.add_argument('--model', default='RN50', choices=['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT32', 'ViT16'], help='clip model name')
parser.add_argument('--class-set', default=['voc'], nargs='+',
choices=['kitti', 'nuscenes', 'scannet', 'city', 'ade', 'stuff', 'voc', 'context', 'acontext', 'mickey', 'batman', 'mario', 'gates', 'blur', 'sports', 'car_brands', 'batman_ext', 'car_color'],
help='the set of class names')
parser.add_argument('--no-prompt-eng', action='store_true', help='disable prompt engineering')
args = parser.parse_args()
return args
def zeroshot_classifier(model_name, classnames, templates):
model, preprocess = clip.load(model_name)
with torch.no_grad():
zeroshot_weights = []
for classname in classnames:
texts = [template.format(classname) for template in templates] #format with class
texts = clip.tokenize(texts).cuda() #tokenize
class_embeddings = model.encode_text(texts) #embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).cuda()
return zeroshot_weights
if __name__ == '__main__':
args = parse_args()
classes = []
all_set_name = ''
name_mapping = {'kitti': kitti_classes, 'nuscenes': nuscenes_classes, 'scannet': scannet_classes, 'city': cityscapes_classes, 'ade': ade20k_classes, 'stuff': coco_stuff_classes, 'voc': voc_classes, 'context': pascal_context_classes, 'acontext': all_pascal_context_classes, 'mickey': mickey_classes, 'batman': batman_classes, 'mario': mario_classes, 'gates': gates_classes, 'blur': blur_classes, 'sports': sports_classes, 'car_brands': car_brands_classes, 'batman_ext': batman_ext_classes, 'car_color': car_color_classes}
for set_name in args.class_set:
if set_name in name_mapping:
classes += name_mapping[set_name]
all_set_name += '_{}'.format(set_name)
if set_name in ['blur'] or args.no_prompt_eng:
prompt_templates = ['a photo of a {}.']
# remove redundant classes
classes = list(dict.fromkeys(classes))
# remove the first underline
all_set_name = all_set_name[1:]
print(classes)
print(f"{len(classes)} class(es), {len(prompt_templates)} template(s)")
# ['RN50', 'RN101', 'RN50x4', 'RN50x16', 'ViT-B/32', 'ViT-B/16']
name_mapping = {'RN50': 'RN50', 'RN101': 'RN101', 'RN50x4': 'RN50x4', 'RN50x16': 'RN50x16', 'ViT32': 'ViT-B/32', 'ViT16': 'ViT-B/16'}
zeroshot_weights = zeroshot_classifier(name_mapping[args.model], classes, prompt_templates)
zeroshot_weights = zeroshot_weights.permute(1, 0).float()
print(zeroshot_weights.shape)
prefix = f'{all_set_name}_{args.model}'
if args.no_prompt_eng:
prefix += '_npe'
torch.save(zeroshot_weights, f'{prefix}_clip_text.pth')
| 17,422 | 171.50495 | 5,180 | py |
CLIP2Scene | CLIP2Scene-main/utils/metrics.py | import torch
def confusion_matrix(preds, labels, num_classes):
hist = (
torch.bincount(
num_classes * labels + preds,
minlength=num_classes ** 2,
)
.reshape(num_classes, num_classes)
.float()
)
return hist
def compute_IoU_from_cmatrix(hist, ignore_index=None):
"""Computes the Intersection over Union (IoU).
Args:
hist: confusion matrix.
Returns:
m_IoU, fw_IoU, and matrix IoU
"""
if ignore_index is not None:
hist[ignore_index] = 0.0
intersection = torch.diag(hist)
union = hist.sum(dim=1) + hist.sum(dim=0) - intersection
IoU = intersection.float() / union.float()
IoU[union == 0] = 1.0
if ignore_index is not None:
IoU = torch.cat((IoU[:ignore_index], IoU[ignore_index+1:]))
m_IoU = torch.mean(IoU).item()
fw_IoU = (
torch.sum(intersection) / (2 * torch.sum(hist) - torch.sum(intersection))
).item()
return m_IoU, fw_IoU, IoU
def compute_IoU(preds, labels, num_classes, ignore_index=None):
"""Computes the Intersection over Union (IoU)."""
hist = confusion_matrix(preds, labels, num_classes)
return compute_IoU_from_cmatrix(hist, ignore_index)
| 1,229 | 28.285714 | 81 | py |
CLIP2Scene | CLIP2Scene-main/utils/pc_utils.py | """ Utility functions for processing point clouds.
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
import warnings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Draw point cloud
from eulerangles import euler2mat
import math
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
import torch
import random
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.0, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
#a = np.zeros((16,1024,3))
#print point_cloud_to_volume_batch(a, 12, 1.0, False).shape
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
#print loc2pc
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
#print 'pc center: ', pc_center
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
#print (i,j,k), vol[i,j,k,:,:]
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
def surface_normal_area(face, vertex):
normals = list()
areas = list()
vertex_to_face = [[] for i in range(len(vertex))]
for fid, f in enumerate(face):
f = f[0]
va, vb, vc = f[0], f[1], f[2]
vertex_to_face[va].append(fid)
vertex_to_face[vb].append(fid)
vertex_to_face[vc].append(fid)
a = vertex[vb] - vertex[va]
b = vertex[vc] - vertex[va]
normal = np.cross(a, b)
area = np.dot(normal, normal) / 2.0
normalized_normal = normal / np.linalg.norm(normal)
normals.append(normalized_normal)
areas.append(area)
return np.array(normals), np.array(areas), vertex_to_face
def vertex_normal(vertex_to_face, normal, areas):
vertex_normals = list()
num_vertex = len(vertex_to_face)
for vid in range(num_vertex):
adj_faces = vertex_to_face[vid]
if len(adj_faces)==0: # single point with no adjancy points
vertex_normals.append([0,0,1])
continue
adj_faces_area = np.expand_dims(np.array(areas[adj_faces]), axis=-1)
adj_faces_normal = np.array(normal[adj_faces])
avg_normal = (adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area)
avg_normal = np.sum(avg_normal, axis=0)
normalized_normal = avg_normal / np.linalg.norm(avg_normal)
#if np.isclose(np.linalg.norm(avg_normal), 0.0):
# print('-------------------')
# print(len(adj_faces))
# print('-------------------')
# print('-------------------')
# print(adj_faces_area.shape, adj_faces_normal.shape, adj_faces_area, adj_faces_normal)
# print(adj_faces_normal * adj_faces_area)
# print(np.sum(adj_faces_area))
# print((adj_faces_normal * adj_faces_area) / np.sum(adj_faces_area))
# print(avg_normal, np.linalg.norm(avg_normal), adj_faces_area, adj_faces_normal)
# print('-------------------')
vertex_normals.append(normalized_normal)
return np.array(vertex_normals)
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def read_ply_rgba(filename):
""" read XYZRGBA point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
return pc_array
def read_ply_rgba_normal(filename):
""" read XYZRGBA and NxNyNz point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z,r,g,b,a] for x,y,z,r,g,b,a in pc])
face = plydata['face'].data
f_n, f_a, v_f = surface_normal_area(face, pc_array[:, 0:3])
v_n = vertex_normal(v_f, f_n, f_a)
pc_array = np.concatenate((pc_array, v_n), axis=-1)
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb(points, colors, filename, text=True):
""" input: Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
def write_ply_rgb_normal(points, colors, normals, filename, text=True):
""" input: Nx3, Nx3, Nx3 write points and colors to filename as PLY format. """
num_points = len(points)
assert len(colors) == num_points
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
colors = [(colors[i,0], colors[i,1], colors[i,2]) for i in range(colors.shape[0])]
normals = [(normals[i,0], normals[i,1], normals[i,2]) for i in range(normals.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
color = np.array(colors, dtype=[('red', 'u1'), ('green', 'u1'),('blue', 'u1')])
normal = np.array(normals, dtype=[('nx', 'f4'), ('ny', 'f4'),('nz', 'f4')])
vertex_all = np.empty(num_points, vertex.dtype.descr + color.dtype.descr + normal.dtype.descr)
for prop in vertex.dtype.names:
vertex_all[prop] = vertex[prop]
for prop in color.dtype.names:
vertex_all[prop] = color[prop]
for prop in normal.dtype.names:
vertex_all[prop] = normal[prop]
el = PlyElement.describe(vertex_all, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+y is up direction)
Output:
gray image as numpy array of size canvasSizexcanvasSize
"""
image = np.zeros((canvasSize, canvasSize))
if input_points is None or input_points.shape[0] == 0:
return image
points = input_points[:, switch_xyz]
M = euler2mat(zrot, yrot, xrot)
points = (np.dot(M, points.transpose())).transpose()
# Normalize the point cloud
# We normalize scale to fit points in a unit sphere
if normalize:
centroid = np.mean(points, axis=0)
points -= centroid
furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
points /= furthest_distance
# Pre-compute the Gaussian disk
radius = (diameter-1)/2.0
disk = np.zeros((diameter, diameter))
for i in range(diameter):
for j in range(diameter):
if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
mask = np.argwhere(disk > 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
from PIL import Image
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
def write_ply_color(points, labels, out_filename, num_classes=None, colors=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
print(num_classes)
else:
assert(num_classes>np.max(labels))
if colors is None:
#colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)]
fout = open(out_filename, 'w')
for i in range(N):
c = colors[labels[i]]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def farthest_pts_sampling_abuse(pts, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
diff = pts[:, None, :] - pts[None, :, :]
# dis_mat = np.sum(diff * diff, axis=2)
dis_mat = np.linalg.norm(diff, axis=2)
N = num_samples
perm = np.zeros(N, dtype=np.int64)
lambdas = np.zeros(N)
ds = dis_mat[0, :]
for i in range(1, N):
idx = np.argmax(ds)
perm[i] = idx
lambdas[i] = ds[idx]
ds = np.minimum(ds, dis_mat[idx, :])
return pts[perm, :]
def farthest_pts_sampling(coords, num_samples):
'''
naive method
:param pts: n x 3 ndarray
:param num_samples:
:return: num_samples x 3 ndarray
'''
pts = coords.numpy()
dis_mat = np.linalg.norm(pts, axis=2)
point_set = []
perm = np.zeros(num_samples, dtype=np.int64)
index = random.randint(0, pts.shape[0] - 1)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
for i in range(1, num_samples):
refer = pts[index]
diff = np.linalg.norm(pts[:, :] - refer[None, :], axis=1)
index = np.argmin(diff)
point_set.append(pts[index])
pts[index] = np.array([-10000, -10000, -10000])
point_set = np.vstack(point_set)
return point_set
def random_partition(coords):
# print('1')
mask = torch.ones(coords.size()[0]).numpy()
coords_np = coords.numpy()
sample_num = random.randint(2, 5)
random_index = np.random.randint(coords_np.shape[0], size=sample_num)
sample_points = coords_np[random_index, :]
diff = coords_np[:, None, :] - sample_points[None, :, :]
diff = np.linalg.norm(diff, axis=2)
partitions = np.argmin(diff, axis=1)
filter_ind = random.randint(0, sample_num - 1)
# coords_torch = torch.from_numpy(coords_np[partitions != filter_ind])
coords_torch = coords
mask[partitions == filter_ind] = 0
mask = torch.from_numpy(mask)
# print('4')
# part1 = torch.from_numpy(coords_np[partitions == filter_ind])
# part2 = torch.from_numpy(coords_np[partitions != filter_ind])
return coords_torch, mask
# return part1, part2
def random_rotation(coords):
# scale = torch.eye(3)*random.uniform(0.95, 1.05)
scale_flip = np.eye(3) + np.random.randn(3, 3) * 0.1
scale_flip[0][0] *= np.random.randint(0, 2) * 2 - 1
scale_flip = torch.from_numpy(scale_flip).float()
# scale = torch.eye(3)
theta = random.uniform(0, 2) * math.pi
rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]).float()
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
#
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
m = torch.matmul(scale_flip, rotationx)
coords = torch.matmul(coords.float(), m)
return coords
# def random_rotation(coords):
# return coords
def resize_rotation(coords, item):
scale = 0
if item == 'chair':
scale = torch.eye(3) * 0.8
elif item == 'sofa':
scale = torch.eye(3) * 1.75
elif item == 'table':
scale = torch.eye(3) * 1.65
elif item == 'bookshelf':
scale = torch.eye(3) * 1.7
elif item == 'desk':
scale = torch.eye(3) * 1.25
elif item == 'bed':
scale = torch.eye(3) * 2.1
elif item == 'sink':
scale = torch.eye(3) * 1.05
elif item == 'bathtub':
scale = torch.eye(3) * 1.25
elif item == 'toilet':
scale = torch.eye(3) * 0.65
elif item == 'door':
scale = torch.eye(3) * 1.8
elif item == 'curtain':
scale = torch.eye(3) * 2
else :
scale = torch.eye(3) * random.uniform(0.9, 1.75)
'''
if item == 'chair':
scale = torch.eye(3) * random.uniform(5, 5.5)
elif item == 'bed':
scale = torch.eye(3) * random.uniform(1.4, 1.6)
elif item == 'sofa':
scale = torch.eye(3) * random.uniform(9, 9.5)
elif item == 'table':
scale = torch.eye(3) * random.uniform(8, 8.5)
elif item == 'bookshelf':
scale = torch.eye(3) * random.uniform(1.1, 1.2)
elif item == 'desk':
scale = torch.eye(3) * random.uniform(7, 7.5)
elif item == 'nega_data':
scale = torch.eye(3) * random.uniform(5, 8)
'''
# theta = 0 * math.pi
# rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
# [-math.sin(theta), math.cos(theta), 0],
# [0, 0, 1]]).float()
#
# rotationy = torch.tensor([[math.cos(theta), 0, math.sin(theta)],
# [0, 1, 0],
# [math.sin(theta), 0, -math.cos(theta)]]).float()
# rotationz = torch.tensor([[1, 0, 0],
# [0, math.cos(theta), math.sin(theta)],
# [0, -math.sin(theta), math.cos(theta)]]).float()
# m = torch.matmul(scale, rotationz)
m = scale
coords = torch.matmul(coords.float(), m)
return coords | 21,735 | 35.469799 | 104 | py |
CLIP2Scene | CLIP2Scene-main/utils/testfiles.py | import os
import copy
import torch
import numpy as np
from PIL import Image
# import MinkowskiEngine as ME
from pyquaternion import Quaternion
from torch.utils.data import Dataset
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.geometry_utils import view_points
from nuscenes.utils.splits import create_splits_scenes
from nuscenes.utils.data_classes import LidarPointCloud
from torchsparse.utils.quantize import sparse_quantize
import json
from petrel_client.client import Client
import cv2
CUSTOM_SPLIT = [
"scene-0008", "scene-0009", "scene-0019", "scene-0029", "scene-0032", "scene-0042",
"scene-0045", "scene-0049", "scene-0052", "scene-0054", "scene-0056", "scene-0066",
"scene-0067", "scene-0073", "scene-0131", "scene-0152", "scene-0166", "scene-0168",
"scene-0183", "scene-0190", "scene-0194", "scene-0208", "scene-0210", "scene-0211",
"scene-0241", "scene-0243", "scene-0248", "scene-0259", "scene-0260", "scene-0261",
"scene-0287", "scene-0292", "scene-0297", "scene-0305", "scene-0306", "scene-0350",
"scene-0352", "scene-0358", "scene-0361", "scene-0365", "scene-0368", "scene-0377",
"scene-0388", "scene-0391", "scene-0395", "scene-0413", "scene-0427", "scene-0428",
"scene-0438", "scene-0444", "scene-0452", "scene-0453", "scene-0459", "scene-0463",
"scene-0464", "scene-0475", "scene-0513", "scene-0533", "scene-0544", "scene-0575",
"scene-0587", "scene-0589", "scene-0642", "scene-0652", "scene-0658", "scene-0669",
"scene-0678", "scene-0687", "scene-0701", "scene-0703", "scene-0706", "scene-0710",
"scene-0715", "scene-0726", "scene-0735", "scene-0740", "scene-0758", "scene-0786",
"scene-0790", "scene-0804", "scene-0806", "scene-0847", "scene-0856", "scene-0868",
"scene-0882", "scene-0897", "scene-0899", "scene-0976", "scene-0996", "scene-1012",
"scene-1015", "scene-1016", "scene-1018", "scene-1020", "scene-1024", "scene-1044",
"scene-1058", "scene-1094", "scene-1098", "scene-1107",
]
def minkunet_collate_pair_fn(list_data):
"""
Collate function adapted for creating batches with MinkowskiEngine.
"""
(
coords,
feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
superpixels,
) = list(zip(*list_data))
batch_n_points, batch_n_pairings = [], []
offset = 0
for batch_id in range(len(coords)):
# Move batchids to the beginning
coords[batch_id][:, -1] = batch_id
pairing_points[batch_id][:] += offset
pairing_images[batch_id][:, 0] += batch_id * images[0].shape[0]
batch_n_points.append(coords[batch_id].shape[0])
batch_n_pairings.append(pairing_points[batch_id].shape[0])
offset += coords[batch_id].shape[0]
# Concatenate all lists
coords_batch = torch.cat(coords, 0).int()
print(coords_batch.size())
pairing_points = torch.tensor(np.concatenate(pairing_points))
pairing_images = torch.tensor(np.concatenate(pairing_images))
feats_batch = torch.cat(feats, 0).float()
images_batch = torch.cat(images, 0).float()
superpixels_batch = torch.tensor(np.concatenate(superpixels))
return {
"sinput_C": coords_batch,
"sinput_F": feats_batch,
"input_I": images_batch,
"pairing_points": pairing_points,
"pairing_images": pairing_images,
"batch_n_pairings": batch_n_pairings,
"inverse_indexes": inverse_indexes,
"superpixels": superpixels_batch,
}
class NuScenesMatchDataset(Dataset):
"""
Dataset matching a 3D points cloud and an image using projection.
"""
def __init__(
self,
# phase,
# config,
shuffle=False,
cloud_transforms=None,
mixed_transforms=None,
**kwargs,
):
# self.phase = phase
self.shuffle = shuffle
self.cloud_transforms = cloud_transforms
self.mixed_transforms = mixed_transforms
self.cylinder = True
self.voxel_size = 0.1
# self.voxel_size = config["voxel_size"]
# self.cylinder = config["cylindrical_coordinates"]
# self.superpixels_type = config["superpixels_type"]
# self.bilinear_decoder = config["decoder"] == "bilinear"
if "cached_nuscenes" in kwargs:
self.nusc = kwargs["cached_nuscenes"]
else:
self.nusc = NuScenes(
version="v1.0-trainval", dataroot="s3://dataset/nuScenes/", verbose=False
)
# a skip ratio can be used to reduce the dataset size and accelerate experiments
try:
skip_ratio = 1
except KeyError:
skip_ratio = 1
skip_counter = 0
self.dataroot = "s3://liuyouquan/nuScenes" #todo
# self.dataroot = "s3://dataset/nuScenes"
self.client = Client('~/.petreloss.conf')
# print(phase)
# if phase == "train":
# f = open('./list_keyframes_train.json', 'r')
# content = f.read()
# self.list_keyframes = json.loads(content)
#
# f1 = open('./save_dict_train.json', 'r')
# content1 = f1.read()
# self.frames_corrs_info = json.loads(content1)
#
# elif phase == "val":
# f = open('./list_keyframes_val.json', 'r')
# content = f.read()
# self.list_keyframes = json.loads(content)
#
# f1 = open('./save_dict_val.json', 'r')
# content1 = f1.read()
# self.frames_corrs_info = json.loads(content1)
#
# elif phase == "parametrizing":
# with open('./list_keyframes_parametrizing.json', 'r') as f:
# self.list_keyframes = json.load(f)
#
# f1 = open('./save_dict_train.json', 'r')
# content = f1.read()
# self.frames_corrs_info = json.loads(content)
# f1.close()
# # phase_scenes = list(
# # set(create_splits_scenes()["train"]) - set(CUSTOM_SPLIT)
# # )
# elif phase == "verifying":
# phase_scenes = CUSTOM_SPLIT
with open('./list_keyframes_parametrizing.json', 'r') as f:
self.list_keyframes = json.load(f)
f1 = open('./save_dict_train.json', 'r')
content = f1.read()
self.frames_corrs_info = json.loads(content)
f1.close()
# print(data1[key_["LIDAR_TOP"]])
# pcl_path = os.path.join("s3://liuyouquan/nuScenes/", data1[key_["LIDAR_TOP"]][0].replace("samples", ""))
# pcl_path = "s3://liuyouquan/nuScenes/" + data1[key_["LIDAR_TOP"]][0].replace("samples", "")
# f = open('./list_keyframes_parametrizing.json', 'r')
# content = f.read()
# self.list_keyframes = json.loads(content)
#
# f1 = open('./save_dict_parametrizing.json', 'r')
# content1 = f1.read()
# self.frames_corrs_info = json.loads(content1)
# phase_scenes = list(
# print(self.list_keyframes)
# print(type(self.list_keyframes))
# create a list of camera & lidar scans
# for scene_idx in range(len(self.nusc.scene)):
# scene = self.nusc.scene[scene_idx]
# if scene["name"] in phase_scenes:
# skip_counter += 1
# if skip_counter % skip_ratio == 0:
# self.create_list_of_scans(scene)
# def create_list_of_scans(self, scene):
# # Get first and last keyframe in the scene
# current_sample_token = scene["first_sample_token"]
# # Loop to get all successive keyframes
# list_data = []
# while current_sample_token != "":
# current_sample = self.nusc.get("sample", current_sample_token) #TODO
# list_data.append(current_sample["data"])
# current_sample_token = current_sample["next"]
#
# # Add new scans in the list
# self.list_keyframes.extend(list_data)
def map_pointcloud_to_image(self, data, min_dist: float = 1.0):
"""
Given a lidar token and camera sample_data token, load pointcloud and map it to
the image plane. Code adapted from nuscenes-devkit
https://github.com/nutonomy/nuscenes-devkit.
:param min_dist: Distance from the camera below which points are discarded.
"""
# pointsensor = self.nusc.get("sample_data", data["LIDAR_TOP"])
key_ = data["LIDAR_TOP"]
pcl_path = "s3://liuyouquan/nuScenes" + self.frames_corrs_info[key_][0].replace("samples", "")
# print(pcl_path)
# pcl_path = os.path.join("s3://liuyouquan/nuScenes/", self.frames_corrs_info[key_][0].replace("samples",""))
# print(pcl_path)
# try:
# pc_original = LidarPointCloud.from_file(pcl_path)
# # print("pcl_path: ", pcl_path)
# pc_ref = pc_original.points
# except Exception as e:
# print("pcl_path: ", pcl_path)
images = []
superpixels = []
pairing_points = np.empty(0, dtype=np.int64)
pairing_images = np.empty((0, 3), dtype=np.int64)
camera_list = [
"CAM_FRONT",
"CAM_FRONT_RIGHT",
"CAM_BACK_RIGHT",
"CAM_BACK",
"CAM_BACK_LEFT",
"CAM_FRONT_LEFT",
]
if self.shuffle:
np.random.shuffle(camera_list)
tot = 0
camera_info = self.frames_corrs_info[key_][1]
for i, camera_name in enumerate(camera_list):
# pc = copy.deepcopy(pc_original)
# cam = self.nusc.get("sample_data", data[camera_name]) #todo
camera_path = camera_info[camera_name]["camera_name"]
# print(pc_ref.shape)
# import pdb
# pdb.set_trace()
# camera_path = "samples/CAM_FRONT/n008-2018-07-27-12-07-38-0400__CAM_FRONT__1532707811012460.jpg"
try:
img_bytes = self.client.get(self.dataroot + "/" + camera_path, update_cache=True)
assert img_bytes is not None
# print(camera_path)
except Exception as e:
tot += 1
print(camera_path)
continue
return tot
# img_bytes = self.client.get("s3://dataset/nuScenes/samples/CAM_FRONT/n015-2018-07-18-11-07-57+0800__CAM_FRONT__1531883530412470.jpg", update_cache=True)
# assert img_bytes is not None
img_mem_view = memoryview(img_bytes)
buffer = np.frombuffer(img_mem_view, np.uint8)
im = cv2.imdecode(buffer, cv2.IMREAD_COLOR)
# cv2.imwrite("ttt.jpg", im)
# im = im.reshape(im_shape)
im = np.array(im)
# import pdb
# pdb.set_trace()
# print(im.shape)
# print(im.shape)
# sp = Image.open(
# f"superpixels/nuscenes/"
# f"superpixels_{self.superpixels_type}/{camera_info[camera_name]['token']}.png"
# )
# superpixels.append(np.array(sp))
# Points live in the point sensor frame. So they need to be transformed via
# global to the image plane.
# First step: transform the pointcloud to the ego vehicle frame for the
# timestamp of the sweep.
# cs_record = self.nusc.get(
# "calibrated_sensor", pointsensor["calibrated_sensor_token"]
# )
cs_record = camera_info[camera_name]["cs_record"]
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix)
pc.translate(np.array(cs_record["translation"]))
# Second step: transform from ego to the global frame.
# poserecord = self.nusc.get("ego_pose", pointsensor["ego_pose_token"])
poserecord = camera_info[camera_name]["poserecord"]
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix)
pc.translate(np.array(poserecord["translation"]))
# Third step: transform from global into the ego vehicle frame for the
# timestamp of the image.
# poserecord = self.nusc.get("ego_pose", cam["ego_pose_token"])
poserecord = camera_info[camera_name]["poserecord_"]
pc.translate(-np.array(poserecord["translation"]))
pc.rotate(Quaternion(poserecord["rotation"]).rotation_matrix.T)
# Fourth step: transform from ego into the camera.
# cs_record = self.nusc.get(
# "calibrated_sensor", cam["calibrated_sensor_token"]
# )
cs_record = camera_info[camera_name]["cs_record_"]
pc.translate(-np.array(cs_record["translation"]))
pc.rotate(Quaternion(cs_record["rotation"]).rotation_matrix.T)
# Fifth step: actually take a "picture" of the point cloud.
# Grab the depths (camera frame z axis points away from the camera).
depths = pc.points[2, :]
# Take the actual picture
# (matrix multiplication with camera-matrix + renormalization).
points = view_points(
pc.points[:3, :],
np.array(cs_record["camera_intrinsic"]),
normalize=True,
)
# Remove points that are either outside or behind the camera.
# Also make sure points are at least 1m in front of the camera to avoid
# seeing the lidar points on the camera
# casing for non-keyframes which are slightly out of sync.
points = points[:2].T
mask = np.ones(depths.shape[0], dtype=bool)
mask = np.logical_and(mask, depths > min_dist)
mask = np.logical_and(mask, points[:, 0] > 0)
mask = np.logical_and(mask, points[:, 0] < im.shape[1] - 1)
mask = np.logical_and(mask, points[:, 1] > 0)
mask = np.logical_and(mask, points[:, 1] < im.shape[0] - 1)
matching_points = np.where(mask)[0]
#
matching_pixels = np.round(
np.flip(points[matching_points], axis=1)
).astype(np.int64)
images.append(im / 255)
pairing_points = np.concatenate((pairing_points, matching_points))
pairing_images = np.concatenate(
(
pairing_images,
np.concatenate(
(
np.ones((matching_pixels.shape[0], 1), dtype=np.int64) * i,
matching_pixels,
),
axis=1,
),
)
)
# return tot
return pc_ref.T, images, pairing_points, pairing_images
def __len__(self):
return len(self.list_keyframes)
def getitem(self, idx):
# tot = self.map_pointcloud_to_image(self.list_keyframes[idx])
# return tot
(
pc,
images,
pairing_points,
pairing_images,
) = self.map_pointcloud_to_image(self.list_keyframes[idx])
# superpixels = torch.tensor(superpixels)
intensity = torch.tensor(pc[:, 3:])
pc = torch.tensor(pc[:, :3])
# print(images)
# import pdb
# pdb.set_trace()
#
images = torch.tensor(np.array(images, dtype=np.float32).transpose(0, 3, 1, 2))
# if self.cloud_transforms:
# pc = self.cloud_transforms(pc)
# if self.mixed_transforms:
# (
# pc,
# intensity,
# images,
# pairing_points,
# pairing_images,
# superpixels,
# ) = self.mixed_transforms(
# pc, intensity, images, pairing_points, pairing_images
# )
if self.cylinder:
# Transform to cylinder coordinate and scale for voxel size
x, y, z = pc.T
rho = torch.sqrt(x ** 2 + y ** 2) / self.voxel_size
phi = torch.atan2(y, x) * 180 / np.pi # corresponds to a split each 1°
z = z / self.voxel_size
coords_aug = torch.cat((rho[:, None], phi[:, None], z[:, None]), 1)
else:
coords_aug = pc / self.voxel_size
#
# # Voxelization with MinkowskiEngine
discrete_coords, indexes, inverse_indexes = sparse_quantize(
coords_aug.contiguous().numpy(), return_index=True, return_inverse=True
)
discrete_coords, indexes, inverse_indexes = torch.from_numpy(discrete_coords), torch.from_numpy(indexes), torch.from_numpy(inverse_indexes)
# # indexes here are the indexes of points kept after the voxelization
pairing_points = inverse_indexes[pairing_points]
#
unique_feats = intensity[indexes]
#
discrete_coords = torch.cat(
(
discrete_coords,
torch.zeros(discrete_coords.shape[0], 1, dtype=torch.int32),
),
1,
)
# return
return (
discrete_coords,
unique_feats,
images,
pairing_points,
pairing_images,
inverse_indexes,
)
Dataset = NuScenesMatchDataset()
print("len: ", len(Dataset))
sum_t = 0
for i in range(len(Dataset)):
# for i in range(100):
print(i)
tot = Dataset.getitem(i)
# sum_t += tot
print("sum_t", sum_t) | 17,559 | 37.008658 | 166 | py |
CLIP2Scene | CLIP2Scene-main/utils/convert_clip_weights.py | import torch
import clip
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Extract and save the CLIP visual weights')
parser.add_argument('--model', default='RN50', choices=['RN50', 'RN101', 'RN50x4', 'RN50x16', 'RN50x64', 'ViT32', 'ViT16', 'ViT14'], help='clip model name')
parser.add_argument('--backbone', action='store_true', help='Prepend the word backbone to the key so that it can be directly loaded as a checkpoint')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
name_mapping = {'RN50': 'RN50', 'RN101': 'RN101', 'RN50x4': 'RN50x4', \
'RN50x16': 'RN50x16', 'RN50x64': 'RN50x64', \
'ViT32': 'ViT-B/32', 'ViT16': 'ViT-B/16', 'ViT14': 'ViT-L/14'}
clip_model, preprocess = clip.load(name_mapping[args.model], device='cpu')
state_dict = clip_model.state_dict()
result_model = {'meta': {}, 'state_dict': {}}
all_model = dict()
stem_mapping = {'conv1': 0, 'bn1': 1, 'conv2': 3, 'bn2': 4, 'conv3': 6, 'bn3':7}
clip_keys = []
prefix = 'visual'
for key in state_dict.keys():
if 'ViT' in args.model and prefix in key:
new_key = key[len(f'{prefix}.'):]
if new_key == 'proj':
all_model['proj'] = {}
all_model['proj']['weight'] = state_dict[key].float().t()
continue
if new_key == 'class_embedding':
new_key = 'cls_token'
state_dict[key] = state_dict[key][None, None, :]
elif new_key == 'positional_embedding':
new_key = 'pos_embed'
state_dict[key] = state_dict[key][None, :, :]
elif new_key == 'conv1.weight':
new_key = 'patch_embed.projection.weight'
elif 'ln_pre' in new_key:
weight_or_bias = new_key.split('.')[-1]
new_key = f'ln0.{weight_or_bias}'
elif 'ln_post' in new_key:
weight_or_bias = new_key.split('.')[-1]
new_key = f'ln1.{weight_or_bias}'
elif 'transformer' in new_key:
new_key = 'layers.' + new_key[len('transformer.resblocks.'):]
if 'mlp' in new_key:
new_key = new_key.replace('mlp', 'ffn.layers')
if 'c_fc' in new_key:
new_key = new_key.replace('c_fc', '0.0')
if 'c_proj' in new_key:
new_key = new_key.replace('c_proj', '1')
if 'attn' in new_key:
new_key = new_key.replace('attn', 'attn.attn')
elif 'ln_' in new_key:
new_key = new_key.replace('ln_', 'ln')
if args.backbone:
new_key = 'backbone.' + new_key
clip_keys.append(new_key)
result_model['state_dict'].update({new_key: state_dict[key].float()})
elif prefix in key:
if 'attnpool' in key:
if 'proj' in key:
proj_name = key.split('.')[2]
weight_or_bias = key.split('.')[3]
if proj_name not in all_model:
all_model[proj_name] = {}
all_model[proj_name][weight_or_bias] = state_dict[key].float()
else:
new_key = key[len(f'{prefix}.'):]
if 'layer' not in new_key:
layer_name, layer_type = new_key.split('.')
new_key = 'stem.{}.{}'.format(stem_mapping[layer_name], layer_type)
if 'downsample' in new_key:
splits = new_key.split('.')
new_key = '{}.{}.{}.{}.{}'.format(splits[0], splits[1], splits[2], \
int(splits[3])+1, splits[4])
if args.backbone:
new_key = 'backbone.' + new_key
clip_keys.append(new_key)
result_model['state_dict'].update({new_key: state_dict[key].float()})
if args.backbone:
torch.save(result_model, f'{args.model}_clip_backbone.pth')
else:
all_model['clip'] = result_model['state_dict']
torch.save(all_model, '{}_clip_weights.pth'.format(args.model)) | 4,232 | 46.033333 | 160 | py |
CLIP2Scene | CLIP2Scene-main/utils/transforms.py | import torch
import random
import numpy as np
# from torchvision.transforms import InterpolationMode
from torchvision.transforms import RandomResizedCrop
from torchvision.transforms.functional import resize, resized_crop, hflip
import math
class ComposeClouds:
"""
Compose multiple transformations on a point cloud.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, pc):
for transform in self.transforms:
pc = transform(pc)
return pc
class Rotation_z:
"""
Random rotation of a point cloud around the z axis.
"""
def __init__(self):
pass
def __call__(self, pc):
angle = np.random.random() * 2 * np.pi
c = np.cos(angle)
s = np.sin(angle)
R = torch.tensor(
[[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]], dtype=torch.float32
)
pc = pc @ R.T
return pc
class FlipAxis:
"""
Flip a point cloud in the x and/or y axis, with probability p for each.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, pc):
for curr_ax in range(2):
if random.random() < self.p:
pc[:, curr_ax] = -pc[:, curr_ax]
return pc
class random_rotation_scalling_flipping:
def __init__(self, p=0.5):
self.p = p
def __call__(self, coords):
scale_flip = np.eye(3) + np.random.randn(3, 3) * 0.1
scale_flip[0][0] *= np.random.randint(0, 2) * 2 - 1
scale_flip = torch.from_numpy(scale_flip).float()
# scale = torch.eye(3)
theta = random.uniform(0, 2) * math.pi
rotationx = torch.tensor([[math.cos(theta), math.sin(theta), 0],
[-math.sin(theta), math.cos(theta), 0],
[0, 0, 1]]).float()
m = torch.matmul(scale_flip, rotationx)
coords = torch.matmul(coords.float(), m)
return coords
def make_transforms_clouds(config):
"""
Read the config file and return the desired transformation on point clouds.
"""
transforms = []
if config["transforms_clouds"] is not None:
for t in config["transforms_clouds"]:
if config['dataset'] == 'scannet' and config['mode'] == 'finetune':
transforms.append(random_rotation_scalling_flipping())
# print("sssss")
else:
if t.lower() == "rotation":
transforms.append(Rotation_z())
elif t.lower() == "flipaxis":
transforms.append(FlipAxis())
else:
raise Exception(f"Unknown transformation: {t}")
if not len(transforms):
return None
return ComposeClouds(transforms)
class ComposeAsymmetrical:
"""
Compose multiple transformations on a point cloud, and image and the
intricate pairings between both (only available for the heavy dataset).
Note: Those transformations have the ability to increase the number of
images, and drastically modify the pairings
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, pc, features, img, pairing_points, pairing_images, superpixels=None):
for transform in self.transforms:
pc, features, img, pairing_points, pairing_images, superpixels = transform(
pc, features, img, pairing_points, pairing_images, superpixels
)
if superpixels is None:
return pc, features, img, pairing_points, pairing_images
return pc, features, img, pairing_points, pairing_images, superpixels
class ResizedCrop:
"""
Resize and crop an image, and adapt the pairings accordingly.
"""
def __init__(
self,
image_crop_size=(224, 416),
image_crop_range=[0.3, 1.0],
image_crop_ratio=(14.0 / 9.0, 17.0 / 9.0),
crop_center=False,
):
self.crop_size = image_crop_size
self.crop_range = image_crop_range
self.crop_ratio = image_crop_ratio
# self.img_interpolation = image_interpolation
self.crop_center = crop_center
def __call__(self, pc, features, images, pairing_points, pairing_images, superpixels=None):
imgs = torch.empty(
(images.shape[0], 3) + tuple(self.crop_size), dtype=torch.float32
)
if superpixels is not None:
superpixels = superpixels.unsqueeze(1)
sps = torch.empty(
(images.shape[0],) + tuple(self.crop_size), dtype=torch.uint8
)
pairing_points_out = np.empty(0, dtype=np.int64)
pairing_images_out = np.empty((0, 3), dtype=np.int64)
if self.crop_center:
pairing_points_out = pairing_points
_, _, h, w = images.shape
for id, img in enumerate(images):
mask = pairing_images[:, 0] == id
p2 = pairing_images[mask]
p2 = np.round(
np.multiply(p2, [1.0, self.crop_size[0] / h, self.crop_size[1] / w])
).astype(np.int64)
imgs[id] = resize(img, self.crop_size)
if superpixels is not None:
sps[id] = resize(
superpixels[id], self.crop_size, InterpolationMode.NEAREST
)
p2[:, 1] = np.clip(0, self.crop_size[0] - 1, p2[:, 1])
p2[:, 2] = np.clip(0, self.crop_size[1] - 1, p2[:, 2])
pairing_images_out = np.concatenate((pairing_images_out, p2))
else:
for id, img in enumerate(images):
successfull = False
mask = pairing_images[:, 0] == id
P1 = pairing_points[mask]
P2 = pairing_images[mask]
while not successfull:
i, j, h, w = RandomResizedCrop.get_params(
img, self.crop_range, self.crop_ratio
)
p1 = P1.copy()
p2 = P2.copy()
p2 = np.round(
np.multiply(
p2 - [0, i, j],
[1.0, self.crop_size[0] / h, self.crop_size[1] / w],
)
).astype(np.int64)
valid_indexes_0 = np.logical_and(
p2[:, 1] < self.crop_size[0], p2[:, 1] >= 0
)
valid_indexes_1 = np.logical_and(
p2[:, 2] < self.crop_size[1], p2[:, 2] >= 0
)
valid_indexes = np.logical_and(valid_indexes_0, valid_indexes_1)
sum_indexes = valid_indexes.sum()
len_indexes = len(valid_indexes)
if sum_indexes > 1024 or sum_indexes / len_indexes > 0.75:
successfull = True
imgs[id] = resized_crop(
img, i, j, h, w, self.crop_size
)
if superpixels is not None:
sps[id] = resized_crop(
superpixels[id],
i,
j,
h,
w,
self.crop_size,
)
pairing_points_out = np.concatenate(
(pairing_points_out, p1[valid_indexes])
)
pairing_images_out = np.concatenate(
(pairing_images_out, p2[valid_indexes])
)
if superpixels is None:
return pc, features, imgs, pairing_points_out, pairing_images_out, superpixels
return pc, features, imgs, pairing_points_out, pairing_images_out, sps
class FlipHorizontal:
"""
Flip horizontaly the image with probability p and adapt the matching accordingly.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, pc, features, images, pairing_points, pairing_images, superpixels=None):
w = images.shape[3]
for i, img in enumerate(images):
if random.random() < self.p:
images[i] = hflip(img)
mask = pairing_images[:, 0] == i
pairing_images[mask, 2] = w - 1 - pairing_images[mask, 2]
return pc, features, images, pairing_points, pairing_images, superpixels
class DropCuboids:
"""
Drop random cuboids in a cloud
"""
def __call__(self, pc, features, images, pairing_points, pairing_images, superpixels=None):
range_xyz = torch.max(pc, axis=0)[0] - torch.min(pc, axis=0)[0]
crop_range = np.random.random() * 0.2
new_range = range_xyz * crop_range / 2.0
sample_center = pc[np.random.choice(len(pc))]
max_xyz = sample_center + new_range
min_xyz = sample_center - new_range
upper_idx = torch.sum((pc[:, 0:3] < max_xyz).to(torch.int32), 1) == 3
lower_idx = torch.sum((pc[:, 0:3] > min_xyz).to(torch.int32), 1) == 3
new_pointidx = ~((upper_idx) & (lower_idx))
pc_out = pc[new_pointidx]
features_out = features[new_pointidx]
mask = new_pointidx[pairing_points]
cs = torch.cumsum(new_pointidx, 0) - 1
pairing_points_out = pairing_points[mask]
pairing_points_out = cs[pairing_points_out]
pairing_images_out = pairing_images[mask]
successfull = True
for id in range(len(images)):
if np.sum(pairing_images_out[:, 0] == id) < 1024:
successfull = False
if successfull:
return (
pc_out,
features_out,
images,
np.array(pairing_points_out),
np.array(pairing_images_out),
)
return pc, features, images, pairing_points, pairing_images, superpixels
def make_transforms_asymmetrical(config):
"""
Read the config file and return the desired mixed transformation.
"""
transforms = []
if config["transforms_mixed"] is not None:
for t in config["transforms_mixed"]:
if t.lower() == "resizedcrop":
# pass
transforms.append(
ResizedCrop(
image_crop_size=config["crop_size"],
image_crop_ratio=config["crop_ratio"],
)
)
elif t.lower() == "fliphorizontal":
transforms.append(FlipHorizontal())
elif t.lower() == "dropcuboids":
transforms.append(DropCuboids())
else:
raise Exception(f"Unknown transformation {t}")
if not len(transforms):
return None
return ComposeAsymmetrical(transforms)
def make_transforms_asymmetrical_val(config):
"""
Read the config file and return the desired mixed transformation
for the validation only.
"""
transforms = []
if config["transforms_mixed"] is not None:
for t in config["transforms_mixed"]:
if t.lower() == "resizedcrop":
# pass
transforms.append(
ResizedCrop(image_crop_size=config["crop_size"], crop_center=True)
)
if not len(transforms):
return None
return ComposeAsymmetrical(transforms)
| 11,427 | 33.841463 | 95 | py |
CLIP2Scene | CLIP2Scene-main/model/clip_model.py | import torch.nn as nn
import torch.nn.functional as F
import clip
class ClipFeatureExtractor(nn.Module):
"""
DINO Vision Transformer Feature Extractor.
"""
def __init__(self, config, preprocessing=None):
super(ClipFeatureExtractor, self).__init__()
self.encoder, preprocess = clip.load("ViT-B/32", device="cuda")
for param in self.encoder.parameters():
param.requires_grad = False
# self.decoder = nn.Sequential(
# nn.Conv2d(embed_dim, config["model_n_out"], 1),
# nn.Upsample(scale_factor=patch_size, mode="bilinear", align_corners=True),
# )
self.preprocessing = preprocess
self.normalize_feature = config["normalize_features"]
def forward(self, x):
if self.preprocessing:
x = self.preprocessing(x)
batch_size, _, height, width = x.size()
print(x.size())
x = self.encoder(x)
# the output of x should be [batch_size x (1 + f_height * f_width) x self.embed_dim]
x = self.decoder(x)
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
| 1,158 | 28.717949 | 92 | py |
CLIP2Scene | CLIP2Scene-main/model/image_model.py | import os
import torch
import requests
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls
from model.modules.resnet_encoder import resnet_encoders
import model.modules.dino.vision_transformer as dino_vit
import clip
_MEAN_PIXEL_IMAGENET = [0.485, 0.456, 0.406]
_STD_PIXEL_IMAGENET = [0.229, 0.224, 0.225]
def adapt_weights(architecture):
if architecture == "imagenet" or architecture is None:
return
weights_url = {
"moco_v2": "https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar",
"moco_v1": "https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v1_200ep/moco_v1_200ep_pretrain.pth.tar",
"swav": "https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar",
"deepcluster_v2": "https://dl.fbaipublicfiles.com/deepcluster/deepclusterv2_800ep_pretrain.pth.tar",
"dino": "https://dl.fbaipublicfiles.com/dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
}
if not os.path.exists(f"weights/{architecture}.pt"):
r = requests.get(weights_url[architecture], allow_redirects=True)
os.makedirs("weights", exist_ok=True)
with open(f"weights/{architecture}.pt", 'wb') as f:
f.write(r.content)
weights = torch.load(f"weights/{architecture}.pt")
if architecture == "obow":
return weights["network"]
if architecture == "pixpro":
weights = {
k.replace("module.encoder.", ""): v
for k, v in weights["model"].items()
if k.startswith("module.encoder.")
}
return weights
if architecture in ("moco_v1", "moco_v2", "moco_coco"):
weights = {
k.replace("module.encoder_q.", ""): v
for k, v in weights["state_dict"].items()
if k.startswith("module.encoder_q.") and not k.startswith("module.encoder_q.fc")
}
return weights
if architecture in ("swav", "deepcluster_v2"):
weights = {
k.replace("module.", ""): v
for k, v in weights.items()
if k.startswith("module.") and not k.startswith("module.pro")
}
return weights
if architecture == "dino":
return weights
class Preprocessing:
"""
Use the ImageNet preprocessing.
"""
def __init__(self):
normalize = T.Normalize(mean=_MEAN_PIXEL_IMAGENET, std=_STD_PIXEL_IMAGENET)
self.preprocessing_img = normalize
def __call__(self, image):
return self.preprocessing_img(image)
class DilationFeatureExtractor(nn.Module):
"""
Dilated ResNet Feature Extractor
"""
def __init__(self, config, preprocessing=None):
super(DilationFeatureExtractor, self).__init__()
assert (
config["images_encoder"] == "resnet50"
), "DilationFeatureExtractor is only available for resnet50"
Encoder = resnet_encoders["resnet50"]["encoder"]
params = resnet_encoders["resnet50"]["params"]
params.update(replace_stride_with_dilation=[True, True, True])
self.encoder = Encoder(**params)
if config["image_weights"] == "imagenet":
self.encoder.load_state_dict(model_zoo.load_url(model_urls["resnet50"]))
weights = adapt_weights(architecture=config["image_weights"])
if weights is not None:
self.encoder.load_state_dict(weights)
for param in self.encoder.parameters():
param.requires_grad = False
in1 = 2048
self.decoder = nn.Sequential(
nn.Conv2d(in1, config["model_n_out"], 1),
nn.Upsample(scale_factor=4, mode="bilinear", align_corners=True),
)
self.preprocessing = preprocessing
self.normalize_feature = config["normalize_features"]
self.channel_avgpooling = nn.AvgPool2d((32, 1), stride=(32, 1))
self.upsample4 = nn.Upsample(scale_factor=4, mode="bilinear", align_corners=True)
def forward(self, x):
import pdb
pdb.set_trace()
if self.preprocessing:
x = self.preprocessing(x)
x = self.encoder(x)
# x = self.channel_avgpooling(x.permute(0, 2, 1, 3))
# x = self.upsample4(x.permute(0, 2, 1, 3))
x = self.decoder(x)
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
class PPKTFeatureExtractor(nn.Module):
"""
PPKT baseline
"""
def __init__(self, config, preprocessing=None):
super(PPKTFeatureExtractor, self).__init__()
Encoder = resnet_encoders[config["images_encoder"]]["encoder"]
params = resnet_encoders[config["images_encoder"]]["params"]
self.encoder = Encoder(**params)
if config["image_weights"] == "imagenet":
self.encoder.load_state_dict(model_zoo.load_url(model_urls[config["images_encoder"]]))
if config["image_weights"] not in (None, "imagenet"):
assert (
config["images_encoder"] == "resnet50"
), "{} weights are only available for resnet50".format(
config["images_weights"]
)
weights = adapt_weights(architecture=config["image_weights"])
if weights is not None:
self.encoder.load_state_dict(weights)
for param in self.encoder.parameters():
param.requires_grad = False
if config["images_encoder"] == "resnet18":
in1 = 512
elif config["images_encoder"] == "resnet50":
in1 = 2048
self.decoder = nn.Sequential(
nn.Conv2d(in1, config["model_n_out"], 1),
nn.Upsample(scale_factor=32, mode="bilinear", align_corners=True),
)
self.preprocessing = preprocessing
self.normalize_feature = config["normalize_features"]
def forward(self, x):
if self.preprocessing:
x = self.preprocessing(x)
x = self.decoder(self.encoder(x))
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
class DinoVitFeatureExtractor(nn.Module):
"""
DINO Vision Transformer Feature Extractor.
"""
def __init__(self, config, preprocessing=None):
super(DinoVitFeatureExtractor, self).__init__()
dino_models = {
"vit_small_p16": ("vit_small", 16, 384),
"vit_small_p8": ("vit_small", 8, 384),
"vit_base_p16": ("vit_base", 16, 768),
"vit_base_p8": ("vit_base", 8, 768),
}
assert (
config["images_encoder"] in dino_models.keys()
), f"DilationFeatureExtractor is only available for {dino_models.keys()}"
model_name, patch_size, embed_dim = dino_models[config["images_encoder"]]
print("Use Vision Transformer pretrained with DINO as the image encoder")
print(f"==> model_name: {model_name}")
print(f"==> patch_size: {patch_size}")
print(f"==> embed_dim: {embed_dim}")
self.patch_size = patch_size
self.embed_dim = embed_dim
self.encoder = dino_vit.__dict__[model_name](patch_size=patch_size, num_classes=0)
dino_vit.load_pretrained_weights(self.encoder, "", None, model_name, patch_size)
for param in self.encoder.parameters():
param.requires_grad = False
self.decoder = nn.Sequential(
nn.Conv2d(embed_dim, config["model_n_out"], 1),
nn.Upsample(scale_factor=patch_size, mode="bilinear", align_corners=True),
)
self.preprocessing = preprocessing
self.normalize_feature = config["normalize_features"]
def forward(self, x):
if self.preprocessing:
x = self.preprocessing(x)
batch_size, _, height, width = x.size()
assert (height % self.patch_size) == 0
assert (width % self.patch_size) == 0
f_height = height // self.patch_size
f_width = width // self.patch_size
x = self.encoder(x, all=True)
# the output of x should be [batch_size x (1 + f_height * f_width) x self.embed_dim]
assert x.size(1) == (1 + f_height * f_width)
# Remove the CLS token and reshape the the patch token features.
x = x[:, 1:, :].contiguous().transpose(1, 2).contiguous().view(batch_size, self.embed_dim, f_height, f_width)
x = self.decoder(x)
if self.normalize_feature:
x = F.normalize(x, p=2, dim=1)
return x
| 8,571 | 34.27572 | 119 | py |
CLIP2Scene | CLIP2Scene-main/model/fusionNet.py | import os
import torch
import requests
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls
from model.modules.resnet_encoder import resnet_encoders
import model.modules.dino.vision_transformer as dino_vit
class fusionNet(nn.Module):
"""
Dilated ResNet Feature Extractor
"""
def __init__(self, config):
super().__init__()
self.config = config
self.text_embeddings_path = self.config['text_embeddings_path']
text_categories = self.config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.img_size = (224, 416)
self.t = 1
def forward(self, feature_packages):
# feature_packages size: voxelSize * 8 * 1537
# pixel_feature, point_feature, text_embedding, pred = feature_packages[:, :, :512], feature_packages[:, :, 512:1024], feature_packages[:, :, 1024:1536], feature_packages[:, :, -1]
pixel_feature, point_feature, pred = feature_packages[:, :, :512], feature_packages[:, :, 512:1024], feature_packages[:, :, -1]
pixel_pred = pred[:, 0].long()
text_embedding = self.text_embeddings[pixel_pred].unsqueeze(1)
pixel_point_feature = point_feature
pixel_point_attention = torch.sum(pixel_point_feature * text_embedding, dim=2)
index_point_sum = torch.sum(pixel_point_attention, dim=1) != 0
pixel_point_attention = pixel_point_attention[index_point_sum] / self.t
pixel_point_feature = pixel_point_feature[index_point_sum]
pixel_pred = pixel_pred[index_point_sum]
attention_union_sparse = pixel_point_attention.to_sparse()
attention_union_dense = torch.sparse.softmax(attention_union_sparse, dim=1).to_dense()
fusion_feature = torch.sum(attention_union_dense.unsqueeze(-1) * pixel_point_feature, dim=1)
inner_products = torch.sigmoid(torch.sum(fusion_feature.unsqueeze(1) * pixel_point_feature, dim=2))
return fusion_feature, inner_products, pixel_pred
| 2,487 | 42.649123 | 188 | py |
CLIP2Scene | CLIP2Scene-main/model/resnet.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# https://arxiv.org/abs/2007.10985
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiNetwork
from model.modules.common import ConvType, NormType, get_norm, conv, sum_pool
class Model(MinkowskiNetwork):
OUT_PIXEL_DIST = -1
def __init__(self, in_channels, out_channels, config, D, **kwargs):
super(Model, self).__init__(D)
self.in_channels = in_channels
self.out_channels = out_channels
self.config = config
class ResNetBase(Model):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
OUT_PIXEL_DIST = 32
HAS_LAST_BLOCK = False
CONV_TYPE = ConvType.HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
assert self.BLOCK is not None
assert self.OUT_PIXEL_DIST > 0
super(ResNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
self.network_initialization(in_channels, out_channels, config, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, config, D):
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
dilations = config.dilations
bn_momentum = config.opt.bn_momentum
self.inplanes = self.INIT_DIM
self.conv1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config.conv1_kernel_size, 1),
stride=1,
D=D,
)
self.bn1 = get_norm(
NormType.BATCH_NORM, self.inplanes, D=self.D, bn_momentum=bn_momentum
)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = sum_pool(
kernel_size=space_n_time_m(2, 1), stride=space_n_time_m(2, 1), D=D
)
self.layer1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[0], 1),
)
self.layer2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[1], 1),
)
self.layer3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[2], 1),
)
self.layer4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
stride=space_n_time_m(2, 1),
dilation=space_n_time_m(dilations[3], 1),
)
self.final = conv(
self.PLANES[3] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
D=D,
)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(
self,
block,
planes,
blocks,
stride=1,
dilation=1,
norm_type=NormType.BATCH_NORM,
bn_momentum=0.1,
):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
D=self.D,
),
get_norm(
norm_type,
planes * block.expansion,
D=self.D,
bn_momentum=bn_momentum,
),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
conv_type=self.CONV_TYPE,
D=self.D,
)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
conv_type=self.CONV_TYPE,
D=self.D,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.final(x)
return x
| 5,183 | 28.123596 | 88 | py |
CLIP2Scene | CLIP2Scene-main/model/spconv_backbone.py | from functools import partial
import numpy as np
import spconv
import torch.nn as nn
def post_act_block(
in_channels,
out_channels,
kernel_size,
indice_key=None,
stride=1,
padding=0,
conv_type="subm",
norm_fn=None,
):
if conv_type == "subm":
conv = spconv.SubMConv3d(
in_channels, out_channels, kernel_size, bias=False, indice_key=indice_key
)
elif conv_type == "spconv":
conv = spconv.SparseConv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
indice_key=indice_key,
)
elif conv_type == "inverseconv":
conv = spconv.SparseInverseConv3d(
in_channels, out_channels, kernel_size, indice_key=indice_key, bias=False
)
elif conv_type == "transposeconv":
conv = spconv.SparseConvTranspose3d(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, bias=False, indice_key=indice_key
)
else:
raise NotImplementedError
m = spconv.SparseSequential(
conv,
norm_fn(out_channels),
nn.ReLU(),
)
return m
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(
self, inplanes, planes, stride=1, norm_fn=None, downsample=None, indice_key=None
):
super(SparseBasicBlock, self).__init__()
assert norm_fn is not None
bias = norm_fn is not None
self.conv1 = spconv.SubMConv3d(
inplanes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=bias,
indice_key=indice_key,
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity.features
out.features = self.relu(out.features)
return out
class VoxelBackBone8x(nn.Module):
def __init__(self, input_channels, grid_size, **kwargs):
super().__init__()
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key="subm1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key="subm2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key="subm4"),
)
last_pad = 0
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
64,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
"x_conv1": 16,
"x_conv2": 32,
"x_conv3": 64,
"x_conv4": 64,
}
def forward(self, input_sp_tensor):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
out = self.conv_out(x_conv4)
return out
class VoxelResBackBone8x(nn.Module):
def __init__(self, input_channels, grid_size, **kwargs):
super().__init__()
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(
input_channels, 16, 3, padding=1, bias=False, indice_key="subm1"
),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key="res1"),
SparseBasicBlock(16, 16, norm_fn=norm_fn, indice_key="res1"),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(
16,
32,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv2",
conv_type="spconv",
),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key="res2"),
SparseBasicBlock(32, 32, norm_fn=norm_fn, indice_key="res2"),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(
32,
64,
3,
norm_fn=norm_fn,
stride=2,
padding=1,
indice_key="spconv3",
conv_type="spconv",
),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key="res3"),
SparseBasicBlock(64, 64, norm_fn=norm_fn, indice_key="res3"),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(
64,
128,
3,
norm_fn=norm_fn,
stride=2,
padding=(0, 1, 1),
indice_key="spconv4",
conv_type="spconv",
),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key="res4"),
SparseBasicBlock(128, 128, norm_fn=norm_fn, indice_key="res4"),
)
last_pad = 0
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(
128,
128,
(3, 1, 1),
stride=(2, 1, 1),
padding=last_pad,
bias=False,
indice_key="spconv_down2",
),
norm_fn(128),
nn.ReLU(),
)
self.num_point_features = 128
self.backbone_channels = {
"x_conv1": 16,
"x_conv2": 32,
"x_conv3": 64,
"x_conv4": 128,
}
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
"""
voxel_features, voxel_coords = (
batch_dict["voxel_features"],
batch_dict["voxel_coords"],
)
batch_size = batch_dict["batch_size"]
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size,
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
batch_dict.update(
{"encoded_spconv_tensor": out, "encoded_spconv_tensor_stride": 8}
)
batch_dict.update(
{
"multi_scale_3d_features": {
"x_conv1": x_conv1,
"x_conv2": x_conv2,
"x_conv3": x_conv3,
"x_conv4": x_conv4,
}
}
)
return batch_dict
class HeightCompression(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def forward(self, encoded_spconv_tensor):
"""
Args:
batch_dict:
encoded_spconv_tensor: sparse tensor
Returns:
batch_dict:
spatial_features:
"""
# encoded_spconv_tensor = batch_dict['encoded_spconv_tensor']
spatial_features = encoded_spconv_tensor.dense()
N, C, D, H, W = spatial_features.shape
spatial_features = spatial_features.view(N, C * D, H, W)
return spatial_features
class VoxelNet(VoxelBackBone8x):
def __init__(self, in_channels, out_channels, config, D=3):
self.bev_stride = 8
voxel_size = [0.1, 0.1, 0.2] # nuScenes
point_cloud_range = np.array([-51.2, -51.2, -5.0, 51.2, 51.2, 3.0], dtype=np.float32) # nuScenes
self.grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size).astype(int)[::-1]
self.bach_size = config["batch_size"]
super().__init__(in_channels, self.grid_size)
self.final = spconv.SparseConv3d(
128,
out_channels // 1,
1,
stride=1,
padding=0,
bias=False,
indice_key="final",
)
self.height_compression = HeightCompression()
def forward(self, voxels, coordinates):
sp_tensor = spconv.SparseConvTensor(
features=voxels,
indices=coordinates,
spatial_shape=self.grid_size,
batch_size=self.bach_size
)
sp_tensor = super(VoxelNet, self).forward(sp_tensor)
sp_tensor = self.final(sp_tensor)
sp_tensor = self.height_compression(sp_tensor)
return sp_tensor
| 12,158 | 28.512136 | 117 | py |
CLIP2Scene | CLIP2Scene-main/model/spvcnn.py | import torchsparse
import torchsparse.nn as spnn
import torch
import torch.nn.functional as F
import numpy as np
import pickle
from torch import nn
from torchsparse import PointTensor
from torchsparse import SparseTensor
from torchsparse.nn.utils import fapply
import torch
import torch.nn as nn
import torch.nn.functional as F
# from .range_utils import resample_grid_stacked
import torch
from torch.nn import functional as F1
# import range_utils.nn.functional as rnf
import torch
import torchsparse.nn.functional as F
from torchsparse import PointTensor, SparseTensor
from torchsparse.nn.utils import get_kernel_offsets
import os
# z: PointTensor
# return: SparseTensor
def initial_voxelize(z, init_res, after_res):
new_float_coord = torch.cat(
[(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1)
pc_hash = F.sphash(torch.floor(new_float_coord).int())
sparse_hash = torch.unique(pc_hash)
idx_query = F.sphashquery(pc_hash, sparse_hash)
counts = F.spcount(idx_query.int(), len(sparse_hash))
inserted_coords = F.spvoxelize(torch.floor(new_float_coord), idx_query,
counts)
inserted_coords = torch.round(inserted_coords).int()
inserted_feat = F.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)
new_tensor.cmaps.setdefault(new_tensor.stride, new_tensor.coords)
z.additional_features['idx_query'][1] = idx_query
z.additional_features['counts'][1] = counts
z.C = new_float_coord
return new_tensor
# x: SparseTensor, z: PointTensor
# return: SparseTensor
def point_to_voxel(x, z):
if z.additional_features is None or z.additional_features.get(
'idx_query') is None or z.additional_features['idx_query'].get(
x.s) is None:
pc_hash = F.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1))
sparse_hash = F.sphash(x.C)
idx_query = F.sphashquery(pc_hash, sparse_hash)
counts = F.spcount(idx_query.int(), x.C.shape[0])
z.additional_features['idx_query'][x.s] = idx_query
z.additional_features['counts'][x.s] = counts
else:
idx_query = z.additional_features['idx_query'][x.s]
counts = z.additional_features['counts'][x.s]
inserted_feat = F.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, x.C, x.s)
new_tensor.cmaps = x.cmaps
new_tensor.kmaps = x.kmaps
return new_tensor
# x: SparseTensor, z: PointTensor
# return: PointTensor
def voxel_to_point(x, z, nearest=False):
if z.idx_query is None or z.weights is None or z.idx_query.get(
x.s) is None or z.weights.get(x.s) is None:
off = get_kernel_offsets(2, x.s, 1, device=z.F.device)
old_hash = F.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s[0]).int() * x.s[0],
z.C[:, -1].int().view(-1, 1)
], 1), off)
pc_hash = F.sphash(x.C.to(z.F.device))
idx_query = F.sphashquery(old_hash, pc_hash)
weights = F.calc_ti_weights(z.C, idx_query,
scale=x.s[0]).transpose(0, 1).contiguous()
idx_query = idx_query.transpose(0, 1).contiguous()
if nearest:
weights[:, 1:] = 0.
idx_query[:, 1:] = -1
new_feat = F.spdevoxelize(x.F, idx_query, weights)
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
new_tensor.idx_query[x.s] = idx_query
new_tensor.weights[x.s] = weights
z.idx_query[x.s] = idx_query
z.weights[x.s] = weights
else:
new_feat = F.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s))
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
return new_tensor
save_ceph = False
if save_ceph:
from petrel_client.client import Client
ceph_client = Client()
__all__ = ['SPVCNN']
class SyncBatchNorm(nn.SyncBatchNorm):
def forward(self, input: SparseTensor) -> SparseTensor:
return fapply(input, super().forward)
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
SyncBatchNorm(outc),
spnn.ReLU(True),
)
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
stride=stride,
transposed=True),
SyncBatchNorm(outc),
spnn.ReLU(True),
)
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
expansion = 1
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
SyncBatchNorm(outc),
spnn.ReLU(True),
spnn.Conv3d(outc, outc, kernel_size=ks, dilation=dilation,
stride=1),
SyncBatchNorm(outc),
)
if inc == outc * self.expansion and stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
spnn.Conv3d(inc, outc * self.expansion, kernel_size=1, dilation=1,
stride=stride),
SyncBatchNorm(outc * self.expansion),
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc, outc, 1, bias=False),
SyncBatchNorm(outc),
spnn.Conv3d(outc, outc, ks, stride, bias=False, dilation=dilation),
SyncBatchNorm(outc),
spnn.Conv3d(outc, outc * self.expansion, 1, bias=False),
SyncBatchNorm(outc * self.expansion)
)
if inc == outc * self.expansion and stride == 1:
self.downsample = nn.Identity()
else:
self.downsample = nn.Sequential(
spnn.Conv3d(inc, outc * self.expansion, kernel_size=1, dilation=1,
stride=stride),
SyncBatchNorm(outc * self.expansion),
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class BaseSegmentor(nn.Module):
def __init__(self, model_cfg, num_class):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
# self.dataset = dataset
# self.class_names = dataset.class_names
def load_params(self, model_state_disk, strict=False):
my_model_dict = self.state_dict()
part_load = {}
for k in model_state_disk.keys():
value = model_state_disk[k]
if k.startswith("module."):
k = k[len("module."):]
if k in my_model_dict and my_model_dict[k].shape == value.shape:
part_load[k] = value
return self.load_state_dict(part_load, strict=strict)
def load_params_from_file(self, filename, logger, to_cpu=False):
if not os.path.isfile(filename):
raise FileNotFoundError
logger.info('==> Loading parameters from checkpoint %s to %s' % (filename, 'CPU' if to_cpu else 'GPU'))
loc_type = torch.device('cpu') if to_cpu else None
model_state_disk = torch.load(filename, map_location=loc_type)
if 'model_state' in model_state_disk:
model_state_disk = model_state_disk['model_state']
msg = self.load_params(model_state_disk)
logger.info(f"==> Done {msg}")
def forward(self, batch_dict):
raise NotImplementedError
class SPVCNN(nn.Module):
def _make_layer(self, block, out_channels, num_block, stride=1):
layers = []
layers.append(block(self.in_channels, out_channels, stride=stride))
self.in_channels = out_channels * block.expansion
for _ in range(1, num_block):
layers.append(block(self.in_channels, out_channels))
return layers
# (self, in_channels, out_channels, config, D=3):
# def __init__(self, model_cfg, num_class, dataset=None):
def __init__(self, in_channels, num_class, config):
super().__init__()
self.name = "spvcnn"
self.in_feature_dim = in_channels
self.num_class = num_class
self.config = config
# Default is MinkUNet50
# self.num_layer = model_cfg.get('NUM_LAYER', [2, 3, 4, 6, 2, 2, 2, 2])
# [2, 3, 4, 6, 2, 2, 2, 2]
self.num_layer = [2, 2, 2, 2, 2, 2, 2, 2]
# self.num_layer = [2, 3, 4, 6, 2, 2, 2, 2]
self.block = ResidualBlock
# self.block = {
# 'ResBlock': ResidualBlock,
# 'Bottleneck': Bottleneck,
# }[model_cfg.get('BLOCK', 'Bottleneck')]
cr = 1
# cs = model_cfg.get('PLANES', [32, 32, 64, 128, 256, 256, 128, 96, 96])
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
self.pres = 0.05
self.vres = 0.05
self.stem = nn.Sequential(
spnn.Conv3d(self.in_feature_dim, cs[0], kernel_size=3, stride=1),
SyncBatchNorm(cs[0]), spnn.ReLU(True),
spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
SyncBatchNorm(cs[0]), spnn.ReLU(True))
self.in_channels = cs[0]
self.stage1 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[1], self.num_layer[0]),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[2], self.num_layer[1]),
)
self.stage3 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[3], self.num_layer[2]),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(self.in_channels, self.in_channels, ks=2, stride=2, dilation=1),
*self._make_layer(self.block, cs[4], self.num_layer[3]),
)
self.up1 = [BasicDeconvolutionBlock(self.in_channels, cs[5], ks=2, stride=2)]
self.in_channels = cs[5] + cs[3] * self.block.expansion
self.up1.append(nn.Sequential(*self._make_layer(self.block, cs[5], self.num_layer[4])))
self.up1 = nn.ModuleList(self.up1)
self.up2 = [BasicDeconvolutionBlock(self.in_channels, cs[6], ks=2, stride=2)]
self.in_channels = cs[6] + cs[2] * self.block.expansion
self.up2.append(nn.Sequential(*self._make_layer(self.block, cs[6], self.num_layer[5])))
self.up2 = nn.ModuleList(self.up2)
self.up3 = [BasicDeconvolutionBlock(self.in_channels, cs[7], ks=2, stride=2)]
self.in_channels = cs[7] + cs[1] * self.block.expansion
self.up3.append(nn.Sequential(*self._make_layer(self.block, cs[7], self.num_layer[6])))
self.up3 = nn.ModuleList(self.up3)
self.up4 = [BasicDeconvolutionBlock(self.in_channels, cs[8], ks=2, stride=2)]
self.in_channels = cs[8] + cs[0]
self.up4.append(nn.Sequential(*self._make_layer(self.block, cs[8], self.num_layer[7])))
self.up4 = nn.ModuleList(self.up4)
# self.multi_scale = self.model_cfg.get('MULTI_SCALE', 'concat')
self.multi_scale = 'concat'
if self.multi_scale == 'concat':
self.classifier = nn.Sequential(nn.Linear((cs[4] + cs[6] + cs[8]) * self.block.expansion, self.num_class))
elif self.multi_scale == 'sum':
raise Exception('obsolete')
self.l1 = nn.Linear(cs[4] * self.block.expansion, cs[8] * self.block.expansion)
self.l2 = nn.Linear(cs[6] * self.block.expansion, cs[8] * self.block.expansion)
self.classifier = nn.Sequential(nn.Linear(cs[8] * self.block.expansion + (23 if self.concatattheend else 0), self.num_class))
elif self.multi_scale == 'se':
raise Exception('obsolete')
self.pool = nn.AdaptiveMaxPool1d(1)
self.attn = nn.Sequential(
nn.Linear((cs[4] + cs[6] + cs[8]) * self.block.expansion + (23 if self.concatattheend else 0), cs[8] * self.block.expansion, bias=False),
nn.ReLU(True),
nn.Linear(cs[8] * self.block.expansion, (cs[4] + cs[6] + cs[8]) * self.block.expansion + (23 if self.concatattheend else 0), bias=False),
nn.Sigmoid(),
)
self.classifier = nn.Sequential(nn.Linear((cs[4] + cs[6] + cs[8]) * self.block.expansion + (23 if self.concatattheend else 0), self.num_class))
else:
self.classifier = nn.Sequential(nn.Linear(cs[8] * self.block.expansion + (23 if self.concatattheend else 0), self.num_class))
self.point_transforms = nn.ModuleList([
nn.Sequential(
nn.Linear(cs[0], cs[4] * self.block.expansion),
nn.SyncBatchNorm(cs[4] * self.block.expansion),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[4] * self.block.expansion, cs[6] * self.block.expansion),
nn.SyncBatchNorm(cs[6] * self.block.expansion),
nn.ReLU(True),
),
nn.Sequential(
nn.Linear(cs[6] * self.block.expansion, cs[8] * self.block.expansion),
nn.SyncBatchNorm(cs[8] * self.block.expansion),
nn.ReLU(True),
)
])
self.weight_initialization()
dropout_p = 0.0 #model_cfg.get('DROPOUT_P', 0.3)
self.dropout = nn.Dropout(dropout_p, True)
self.text_embeddings_path = self.config['text_embeddings_path']
text_categories = self.config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.text_embeddings = torch.cat((self.text_embeddings[0, :].unsqueeze(0)*0, self.text_embeddings), dim=0)
self.point_mapping_local = nn.Linear(480, 512)
self.point_mapping_global = nn.Linear(480, 512)
self.point_mapping_global_random = nn.Linear(480, 512)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.SyncBatchNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# def forward(self, x):
def forward(self, batch_dict, return_logit=False, return_tta=False):
""", previous_memory=[None, None, None, None], previous_offset=None, return_memory=False):"""
x = batch_dict
z = PointTensor(x.F, x.C.float())
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
z1 = voxel_to_point(x4, z0)
z1.F = z1.F + self.point_transforms[0](z0.F)
y1 = point_to_voxel(x4, z1)
y1.F = self.dropout(y1.F)
y1 = self.up1[0](y1)
y1 = torchsparse.cat([y1, x3])
y1 = self.up1[1](y1)
y2 = self.up2[0](y1)
y2 = torchsparse.cat([y2, x2])
y2 = self.up2[1](y2)
z2 = voxel_to_point(y2, z1)
z2.F = z2.F + self.point_transforms[1](z1.F)
y3 = point_to_voxel(y2, z2)
y3.F = self.dropout(y3.F)
y3 = self.up3[0](y3)
y3 = torchsparse.cat([y3, x1])
y3 = self.up3[1](y3)
y4 = self.up4[0](y3)
y4 = torchsparse.cat([y4, x0])
y4 = self.up4[1](y4)
z3 = voxel_to_point(y4, z2)
z3.F = z3.F + self.point_transforms[2](z2.F)
if self.multi_scale == 'concat':
feat = torch.cat([z1.F, z2.F, z3.F], dim=1)
if self.config['mode'] == 'pretrain':
point_local = self.point_mapping_local(feat)
point_global = self.point_mapping_global(feat)
return point_local, point_global
elif self.config['mode'] == 'finetune':
out = self.classifier(feat)
return out
elif self.config['mode'] == 'source_free':
feat = self.point_mapping_global(feat)
out = F1.conv1d(feat.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
return out
elif self.config['mode'] == 'zero_shot':
feat = self.point_mapping_global(feat)
out = F1.conv1d(feat.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
return out
elif self.multi_scale == 'sum':
out = self.classifier(self.l1(z1.F) + self.l2(z2.F) + z3.F)
elif self.multi_scale == 'se':
attn = torch.cat([z1.F, z2.F, z3.F], dim=1)
attn = self.pool(attn.permute(1, 0)).permute(1, 0)
attn = self.attn(attn)
out = self.classifier(torch.cat([z1.F, z2.F, z3.F], dim=1) * attn)
else:
out = self.classifier(z3.F)
return out
def forward_ensemble(self, batch_dict):
return self.forward(batch_dict, ensemble=True)
| 18,958 | 36.691849 | 155 | py |
CLIP2Scene | CLIP2Scene-main/model/vit.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
from xmlrpc.client import Boolean
import torch
import torch.nn as nn
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmcv.cnn.utils.weight_init import (constant_init, kaiming_init,
trunc_normal_)
from mmcv.runner import BaseModule, ModuleList, _load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.utils import _pair as to_2tuple
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmseg.ops import resize
from mmseg.utils import get_root_logger
from builder import BACKBONES
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1.
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d".
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int, optional): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=None,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None):
super(PatchEmbed, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class TransformerEncoderLayer(BaseModule):
"""Implements one encoder layer in Vision Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.0.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): enable bias for qkv if True. Default: True
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: True.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
batch_first=True):
super(TransformerEncoderLayer, self).__init__()
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.attn = MultiheadAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
batch_first=batch_first,
bias=qkv_bias)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, embed_dims, postfix=2)
self.add_module(self.norm2_name, norm2)
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x, return_qkv=False):
q, k, v = None, None, None
if return_qkv:
y = self.norm1(x)
y = F.linear(y, self.attn.attn.in_proj_weight, self.attn.attn.in_proj_bias)
N, L, C = y.shape
y = y.view(N, L, 3, C // 3).permute(2, 0, 1, 3).reshape(3 * N, L, C // 3)
y = F.linear(y, self.attn.attn.out_proj.weight, self.attn.attn.out_proj.bias)
q, k, v = y.tensor_split(3, dim=0)
v += x
v = self.ffn(self.norm2(v), identity=v)
x = self.attn(self.norm1(x), identity=x)
x = self.ffn(self.norm2(x), identity=x)
return x, q, k, v
@BACKBONES.register_module()
class VisionTransformer(BaseModule):
"""Vision Transformer.
This backbone is the implementation of `An Image is Worth 16x16 Words:
Transformers for Image Recognition at
Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
img_size (int | tuple): Input image size. Default: 224.
patch_size (int): The patch size. Default: 16.
in_channels (int): Number of input channels. Default: 3.
embed_dims (int): embedding dimension. Default: 768.
num_layers (int): depth of transformer. Default: 12.
num_heads (int): number of attention heads. Default: 12.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
out_indices (list | tuple | int): Output from which stages.
Default: -1.
qkv_bias (bool): enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
drop_path_rate (float): stochastic depth rate. Default 0.0
with_cls_token (bool): Whether concatenating class token into image
tokens as transformer input. Default: True.
output_cls_token (bool): Whether output the cls_token. If set True,
`with_cls_token` must be True. Default: False.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
patch_norm (bool): Whether to add a norm in PatchEmbed Block.
Default: False.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Default: False.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Default: bicubic.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
img_size=224,
patch_size=16,
patch_bias=True,
in_channels=3,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
out_indices=-1,
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
with_cls_token=True,
output_cls_token=False,
norm_cfg=dict(type='LN'),
act_cfg=dict(type='GELU'),
patch_norm=False,
pre_norm=False,
final_norm=False,
return_qkv=False,
skip_last_attn=False,
interpolate_mode='bicubic',
num_fcs=2,
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(VisionTransformer, self).__init__(init_cfg=init_cfg)
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
if output_cls_token:
assert with_cls_token is True, f'with_cls_token must be True if' \
f'set output_cls_token to True, but got {with_cls_token}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is not None:
raise TypeError('pretrained must be a str or None')
self.img_size = img_size
self.patch_size = patch_size
self.interpolate_mode = interpolate_mode
self.norm_eval = norm_eval
self.with_cp = with_cp
self.pretrained = pretrained
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=patch_size,
padding='corner',
bias=patch_bias,
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None,
)
num_patches = (img_size[0] // patch_size) * \
(img_size[1] // patch_size)
self.with_cls_token = with_cls_token
self.output_cls_token = output_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + 1, embed_dims))
self.drop_after_pos = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
if out_indices == -1:
out_indices = num_layers - 1
self.out_indices = [out_indices]
elif isinstance(out_indices, list) or isinstance(out_indices, tuple):
self.out_indices = out_indices
else:
raise TypeError('out_indices must be type of int, list or tuple')
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, num_layers)
] # stochastic depth decay rule
self.layers = ModuleList()
for i in range(num_layers):
self.layers.append(
TransformerEncoderLayer(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=mlp_ratio * embed_dims,
attn_drop_rate=attn_drop_rate,
drop_rate=drop_rate,
drop_path_rate=dpr[i],
num_fcs=num_fcs,
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
batch_first=True))
self.pre_norm = pre_norm
if pre_norm:
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, embed_dims, postfix=0)
self.add_module(self.norm0_name, norm0)
self.final_norm = final_norm
if final_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.return_qkv = [False] * num_layers
if isinstance(return_qkv, bool):
for out_i in self.out_indices:
self.return_qkv[out_i] = return_qkv
elif isinstance(return_qkv, list) or isinstance(return_qkv, tuple):
for i, out_i in enumerate(self.out_indices):
self.return_qkv[out_i] = return_qkv[i]
else:
raise TypeError('return_qkv must be type of bool, list or tuple')
self.skip_last_attn = skip_last_attn
@property
def norm0(self):
return getattr(self, self.norm0_name)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def init_weights(self):
if (isinstance(self.init_cfg, dict)
and self.init_cfg.get('type') == 'Pretrained'):
logger = get_root_logger()
checkpoint = _load_checkpoint(
self.init_cfg['checkpoint'], logger=logger, map_location='cpu')
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if 'pos_embed' in state_dict.keys():
if self.pos_embed.shape != state_dict['pos_embed'].shape:
logger.info(msg=f'Resize the pos_embed shape from '
f'{state_dict["pos_embed"].shape} to '
f'{self.pos_embed.shape}')
h, w = self.img_size
pos_size = int(
math.sqrt(state_dict['pos_embed'].shape[1] - 1))
state_dict['pos_embed'] = self.resize_pos_embed(
state_dict['pos_embed'],
(h // self.patch_size, w // self.patch_size),
(pos_size, pos_size), self.interpolate_mode)
print(self.load_state_dict(state_dict, False))
elif self.init_cfg is not None:
super(VisionTransformer, self).init_weights()
else:
# We only implement the 'jax_impl' initialization implemented at
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
if 'ffn' in n:
nn.init.normal_(m.bias, mean=0., std=1e-6)
else:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
kaiming_init(m, mode='fan_in', bias=0.)
elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):
constant_init(m, val=1.0, bias=0.)
def _pos_embeding(self, patched_img, hw_shape, pos_embed):
"""Positiong embeding method.
Resize the pos_embed, if the input image size doesn't match
the training size.
Args:
patched_img (torch.Tensor): The patched image, it should be
shape of [B, L1, C].
hw_shape (tuple): The downsampled image resolution.
pos_embed (torch.Tensor): The pos_embed weighs, it should be
shape of [B, L2, c].
Return:
torch.Tensor: The pos encoded image feature.
"""
assert patched_img.ndim == 3 and pos_embed.ndim == 3, \
'the shapes of patched_img and pos_embed must be [B, L, C]'
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
if x_len != pos_len:
if pos_len == (self.img_size[0] // self.patch_size) * (
self.img_size[1] // self.patch_size) + 1:
pos_h = self.img_size[0] // self.patch_size
pos_w = self.img_size[1] // self.patch_size
else:
raise ValueError(
'Unexpected shape of pos_embed, got {}.'.format(
pos_embed.shape))
pos_embed = self.resize_pos_embed(pos_embed, hw_shape,
(pos_h, pos_w),
self.interpolate_mode)
return self.drop_after_pos(patched_img + pos_embed)
@staticmethod
def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):
"""Resize pos_embed weights.
Resize pos_embed using bicubic interpolate method.
Args:
pos_embed (torch.Tensor): Position embedding weights.
input_shpae (tuple): Tuple for (downsampled input image height,
downsampled input image width).
pos_shape (tuple): The resolution of downsampled origin training
image.
mode (str): Algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'nearest'``
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C]
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
pos_h, pos_w = pos_shape
cls_token_weight = pos_embed[:, 0]
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
pos_embed_weight = resize(
pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)
cls_token_weight = cls_token_weight.unsqueeze(1)
pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
return pos_embed
def forward(self, inputs):
B = inputs.shape[0]
x, hw_shape = self.patch_embed(inputs)
# stole cls_tokens impl from Phil Wang, thanks
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self._pos_embeding(x, hw_shape, self.pos_embed)
if not self.with_cls_token:
# Remove class token for transformer encoder input
x = x[:, 1:]
if self.pre_norm:
x = self.norm0(x)
outs = []
for i, layer in enumerate(self.layers):
x, q, k, v = layer(x, self.return_qkv[i] \
or (i == len(self.layers) - 1 and self.skip_last_attn))
if i == len(self.layers) - 1:
if self.final_norm:
x = self.norm1(x)
if self.return_qkv[i]:
v = self.norm1(v)
if self.skip_last_attn:
if self.with_cls_token:
x[:, 1:] = v[:, 1:]
else:
x = v
if i in self.out_indices:
if self.with_cls_token:
# Remove class token and reshape token for decoder head
out = x[:, 1:]
else:
out = x
B, _, C = out.shape
out = out.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
if self.output_cls_token:
out = [out, x[:, 0]]
if self.return_qkv[i]:
if self.with_cls_token:
q = q[:, 1:]
k = k[:, 1:]
v = v[:, 1:]
v = v.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
out = [out, q, k, v]
outs.append(out)
return tuple(outs)
def train(self, mode=True):
super(VisionTransformer, self).train(mode)
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, nn.LayerNorm):
m.eval()
| 26,623 | 39.03609 | 128 | py |
CLIP2Scene | CLIP2Scene-main/model/minkunet.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# https://arxiv.org/abs/2007.10985
from model.resnet import ResNetBase, get_norm
from model.modules.common import ConvType, NormType, conv, conv_tr
from model.modules.resnet_block import BasicBlock, Bottleneck
from MinkowskiEngine import MinkowskiReLU
from MinkowskiEngine import SparseTensor
import MinkowskiEngine.MinkowskiOps as me
# import torchsparse.nn.functional as F
from torch.nn import functional as F
import torch
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
# CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE # FOR ME0.5
def __init__(self, in_channels, out_channels, config, D=3):
self.normalize_feature = config["normalize_features"]
super(MinkUNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
dilations = self.DILATIONS
bn_momentum = config["bn_momentum"]
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config["kernel_size"], 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr4 = get_norm(
self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr5 = get_norm(
self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr6 = get_norm(
self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D,
)
self.bntr7 = get_norm(
self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum
)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum,
)
self.final = conv(
self.PLANES[7], 512, kernel_size=1, stride=1, bias=True, D=D
)
self.relu = MinkowskiReLU(inplace=True)
self.text_embeddings_path = self.config['text_embeddings_path']
text_categories = self.config['text_categories']
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, 512))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, 512))
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
self.text_embeddings = torch.cat((self.text_embeddings[0, :].unsqueeze(0)*0, self.text_embeddings), dim=0)
self.local_feature = conv(
self.PLANES[7], 512, kernel_size=1, stride=1, bias=True, D=D
)
self.classifier = conv(
self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D
)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
encoder_out = self.block4(out)
out = self.convtr4p16s2(encoder_out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
feats = self.block8(out)
# out = self.final(out)
if self.config['mode'] == 'pretrain':
out = self.final(feats)
local_feature = self.local_feature(feats)
return out.F, local_feature.F
elif self.config['mode'] == 'finetune':
out = self.classifier(feats)
return out.F
elif self.config['mode'] == 'source_free':
feat = self.final(feats)
out = F.conv1d(feat.F.unsqueeze(-1), self.text_embeddings[:, :, None]).squeeze()
return out
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96) | 11,742 | 28.804569 | 114 | py |
CLIP2Scene | CLIP2Scene-main/model/maskclip_model.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import print_log
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmcv.runner import BaseModule, ModuleList, _load_checkpoint
from torch.nn.modules.utils import _pair as to_2tuple
from mmseg.ops import resize
from mmseg.utils import get_root_logger
import math
import torch.nn.functional as F
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.utils import to_2tuple
import torch.nn as nn
import warnings
from collections import OrderedDict
import mmcv
import numpy as np
import torch
import re
def load_checkpoint1(model_load_path, model):
my_model_dict = model.state_dict()
pre_weight = torch.load(model_load_path, map_location='cpu')['state_dict']
revise_keys = [(r'^backbone\.', '')]
for p, r in revise_keys:
pre_weight = OrderedDict(
{re.sub(p, r, k): v
for k, v in pre_weight.items()})
part_load = {}
match_size = 0
nomatch_size = 0
for k in pre_weight.keys():
value = pre_weight[k]
if k in my_model_dict and my_model_dict[k].shape == value.shape:
match_size += 1
part_load[k] = value
else:
print("missed keys: ", k)
nomatch_size += 1
print("matched parameter sets: {}, and no matched: {}".format(match_size, nomatch_size))
my_model_dict.update(part_load)
model.load_state_dict(my_model_dict)
return model
class MaskClipHead(nn.Module):
def __init__(self,
text_embeddings_path='/mnt/lustre/chenrunnan/projects/MaskCLIP/pretrain/nuscenes_ViT16_clip_text.pth',
visual_projs_path='/mnt/lustre/chenrunnan/projects/MaskCLIP/pretrain/ViT16_clip_weights.pth',
channels=0,
num_classes=16,
in_channels=768,
dropout_ratio=0,
conv_cfg=None,
norm_cfg=dict(type='SyncBN', requires_grad=True),
act_cfg=dict(type='ReLU'),
in_index=-1,
input_transform=None,
ignore_index=255,
align_corners=False,
freeze=False,
text_categories=16,
text_channels=512,
vit=True,
ks_thresh=1,
pd_thresh=0.5,
attn_pooling=False,
num_heads=32,
**kwargs):
super(MaskClipHead, self).__init__(**kwargs)
self.in_channels = in_channels
self.input_transform = input_transform
self.channels = channels
self.num_classes = num_classes
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_index = in_index
self.ignore_index = ignore_index
self.align_corners = align_corners
if channels > 0:
self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1)
if dropout_ratio > 0:
self.dropout = nn.Dropout2d(dropout_ratio)
else:
self.dropout = None
self.fp16_enabled = False
self.freeze = freeze
self.text_categories = text_categories
self.text_channels = text_channels
self.text_embeddings_path = text_embeddings_path
self.visual_projs_path = visual_projs_path
if self.text_embeddings_path is None:
self.text_embeddings = nn.Parameter(torch.zeros(text_categories, text_channels))
nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)
else:
self.register_buffer('text_embeddings', torch.randn(text_categories, text_channels))
self.load_text_embeddings()
self.vit = vit
if vit:
self.proj = nn.Conv2d(self.in_channels, text_channels, 1, bias=False)
else:
self.q_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)
self.k_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)
self.v_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)
self.c_proj = nn.Conv2d(self.in_channels, text_channels, 1)
self.load_visual_projs()
self.ks_thresh = ks_thresh
self.pd_thresh = pd_thresh
self.attn_pooling = attn_pooling
self.num_heads = num_heads
self.image_mapping_local = nn.Conv2d(self.in_channels, 512, 1)
def load_text_embeddings(self):
loaded = torch.load(self.text_embeddings_path, map_location='cuda')
self.text_embeddings[:, :] = loaded[:, :]
print_log(f'Loaded text embeddings from {self.text_embeddings_path}', logger=get_root_logger())
def load_visual_projs(self):
loaded = torch.load(self.visual_projs_path, map_location='cuda')
attrs = ['proj'] if self.vit else ['q_proj', 'k_proj', 'v_proj', 'c_proj']
for attr in attrs:
current_attr = getattr(self, attr)
state_dict = loaded[attr]
for key in state_dict:
if 'weight' in key:
state_dict[key] = state_dict[key][:, :, None, None]
current_attr.load_state_dict(state_dict)
print("attrs", attrs)
print_log(f'Loaded proj weights from {self.visual_projs_path}', logger=get_root_logger())
def _init_inputs(self, in_channels, in_index, input_transform):
pass
def _transform_inputs(self, inputs):
pass
# def forward(self, inputs, img_metas, test_cfg):
def forward(self, inputs):
# x = self._transform_inputs(inputs)
x = inputs[self.in_index]
q, k, v, cls_token = None, None, None, None
if self.vit:
if isinstance(x, list) and len(x) == 4:
x, q, k, v = x
if isinstance(x, list) and len(x) == 2:
x, cls_token = x
if v is not None:
feat = self.proj(v)
image_local = self.image_mapping_local(v)
else:
feat = self.proj(x)
if cls_token is not None:
cls_token = self.proj(cls_token[:, :, None, None])[:, :, 0, 0]
else:
if self.attn_pooling:
N, C, H, W = x.shape
x = x.view(N, C, -1).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight[:, :, 0, 0],
k_proj_weight=self.k_proj.weight[:, :, 0, 0],
v_proj_weight=self.v_proj.weight[:, :, 0, 0],
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight[:, :, 0, 0],
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
feat = x[1:].permute(1, 2, 0).view(N, -1, H, W)
else:
q = self.q_proj(x)
k = self.k_proj(x)
q = torch.flatten(q, start_dim=2).transpose(-2, -1)
k = torch.flatten(k, start_dim=2).transpose(-2, -1)
v = self.v_proj(x)
feat = self.c_proj(v)
output = self.cls_seg(feat)
# if not self.training:
# output = self.refine_output(output, k)
return image_local, output
def cls_seg(self, feat):
feat = feat / feat.norm(dim=1, keepdim=True)
output = F.conv2d(feat, self.text_embeddings[:, :, None, None])
return output
def refine_output(self, output, k):
if self.pd_thresh > 0:
N, C, H, W = output.shape
_output = F.softmax(output * 100, dim=1)
max_cls_conf = _output.view(N, C, -1).max(dim=-1)[0]
selected_cls = (max_cls_conf < self.pd_thresh)[:, :, None, None].expand(N, C, H, W)
output[selected_cls] = -100
if k is not None and self.ks_thresh > 0:
output = F.softmax(output * 100, dim=1)
N, C, H, W = output.shape
output = output.view(N, C, -1).transpose(-2, -1)
# softmax
# weight = k @ k.transpose(-2, -1)
# weight = F.softmax(weight, dim=-1)
# L2 distance
k = F.normalize(k, p=2)
weight = k @ k.transpose(-2, -1)
selected_pos = (output.max(dim=-1, keepdim=True)[0] < self.ks_thresh)
selected_pos = selected_pos.expand(-1, -1, C)
weighted_output = weight @ output
output[selected_pos] = weighted_output[selected_pos]
output = output.transpose(-2, -1).view(N, C, H, W)
return output
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1.
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(nn.Module):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d".
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int, optional): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=None,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None):
super(PatchEmbed, self).__init__()
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class TransformerEncoderLayer(nn.Module):
"""Implements one encoder layer in Vision Transformer.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): stochastic depth rate. Default 0.0.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): enable bias for qkv if True. Default: True
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dim)
or (n, batch, embed_dim). Default: True.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
batch_first=True):
super(TransformerEncoderLayer, self).__init__()
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.attn = MultiheadAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
batch_first=batch_first,
bias=qkv_bias)
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, embed_dims, postfix=2)
self.add_module(self.norm2_name, norm2)
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x, return_qkv=False):
q, k, v = None, None, None
if return_qkv:
y = self.norm1(x)
y = F.linear(y, self.attn.attn.in_proj_weight, self.attn.attn.in_proj_bias)
N, L, C = y.shape
y = y.view(N, L, 3, C//3).permute(2, 0, 1, 3).reshape(3*N, L, C//3)
y = F.linear(y, self.attn.attn.out_proj.weight, self.attn.attn.out_proj.bias)
# q, k, v = y.tensor_split(3, dim=0)
nn = y.shape[0]
q, k, v = y[:nn//3, :, :], y[nn//3:(nn//3) * 2, :, :], y[(nn//3) * 2:, :, :]
v += x
v = self.ffn(self.norm2(v), identity=v)
x = self.attn(self.norm1(x), identity=x)
x = self.ffn(self.norm2(x), identity=x)
return x, q, k, v
# @BACKBONES.register_module()
class VisionTransformer(nn.Module):
"""Vision Transformer.
This backbone is the implementation of `An Image is Worth 16x16 Words:
Transformers for Image Recognition at
Scale <https://arxiv.org/abs/2010.11929>`_.
Args:
img_size (int | tuple): Input image size. Default: 224.
patch_size (int): The patch size. Default: 16.
in_channels (int): Number of input channels. Default: 3.
embed_dims (int): embedding dimension. Default: 768.
num_layers (int): depth of transformer. Default: 12.
num_heads (int): number of attention heads. Default: 12.
mlp_ratio (int): ratio of mlp hidden dim to embedding dim.
Default: 4.
out_indices (list | tuple | int): Output from which stages.
Default: -1.
qkv_bias (bool): enable bias for qkv if True. Default: True.
drop_rate (float): Probability of an element to be zeroed.
Default 0.0
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
drop_path_rate (float): stochastic depth rate. Default 0.0
with_cls_token (bool): Whether concatenating class token into image
tokens as transformer input. Default: True.
output_cls_token (bool): Whether output the cls_token. If set True,
`with_cls_token` must be True. Default: False.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
patch_norm (bool): Whether to add a norm in PatchEmbed Block.
Default: False.
final_norm (bool): Whether to add a additional layer to normalize
final feature map. Default: False.
interpolate_mode (str): Select the interpolate mode for position
embeding vector resize. Default: bicubic.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed. Default: False.
pretrained (str, optional): model pretrained path. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
img_size=(224, 224),
patch_size=16,
patch_bias=False,
in_channels=3,
embed_dims=768,
num_layers=12,
num_heads=12,
mlp_ratio=4,
out_indices=-1,
qkv_bias=True,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
with_cls_token=True,
output_cls_token=False,
norm_cfg=dict(type='LN', eps=1e-6),
act_cfg=dict(type='GELU'),
patch_norm=False,
pre_norm=True,
final_norm=True,
return_qkv=True,
skip_last_attn=False,
interpolate_mode='bicubic',
num_fcs=2,
norm_eval=False,
with_cp=False,
pretrained=None,
init_cfg=None):
super(VisionTransformer, self).__init__()
if isinstance(img_size, int):
img_size = to_2tuple(img_size)
elif isinstance(img_size, tuple):
if len(img_size) == 1:
img_size = to_2tuple(img_size[0])
assert len(img_size) == 2, \
f'The size of image should have length 1 or 2, ' \
f'but got {len(img_size)}'
if output_cls_token:
assert with_cls_token is True, f'with_cls_token must be True if' \
f'set output_cls_token to True, but got {with_cls_token}'
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is not None:
raise TypeError('pretrained must be a str or None')
self.img_size = img_size
self.patch_size = patch_size
self.interpolate_mode = interpolate_mode
self.norm_eval = norm_eval
self.with_cp = with_cp
self.pretrained = pretrained
self.patch_embed = PatchEmbed(
in_channels=in_channels,
embed_dims=embed_dims,
conv_type='Conv2d',
kernel_size=patch_size,
stride=patch_size,
padding='corner',
bias=patch_bias,
norm_cfg=norm_cfg if patch_norm else None,
init_cfg=None,
)
num_patches = (img_size[0] // patch_size) * \
(img_size[1] // patch_size)
self.with_cls_token = with_cls_token
self.output_cls_token = output_cls_token
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))
self.pos_embed = nn.Parameter(
torch.zeros(1, num_patches + 1, embed_dims))
self.drop_after_pos = nn.Dropout(p=drop_rate)
if isinstance(out_indices, int):
if out_indices == -1:
out_indices = num_layers - 1
self.out_indices = [out_indices]
elif isinstance(out_indices, list) or isinstance(out_indices, tuple):
self.out_indices = out_indices
else:
raise TypeError('out_indices must be type of int, list or tuple')
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, num_layers)
] # stochastic depth decay rule
self.layers = ModuleList()
for i in range(num_layers):
self.layers.append(
TransformerEncoderLayer(
embed_dims=embed_dims,
num_heads=num_heads,
feedforward_channels=mlp_ratio * embed_dims,
attn_drop_rate=attn_drop_rate,
drop_rate=drop_rate,
drop_path_rate=dpr[i],
num_fcs=num_fcs,
qkv_bias=qkv_bias,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
batch_first=True))
self.pre_norm = pre_norm
if pre_norm:
self.norm0_name, norm0 = build_norm_layer(
norm_cfg, embed_dims, postfix=0)
self.add_module(self.norm0_name, norm0)
self.final_norm = final_norm
if final_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, embed_dims, postfix=1)
self.add_module(self.norm1_name, norm1)
self.return_qkv = [False] * num_layers
if isinstance(return_qkv, bool):
for out_i in self.out_indices:
self.return_qkv[out_i] = return_qkv
elif isinstance(return_qkv, list) or isinstance(return_qkv, tuple):
for i, out_i in enumerate(self.out_indices):
self.return_qkv[out_i] = return_qkv[i]
else:
raise TypeError('return_qkv must be type of bool, list or tuple')
self.skip_last_attn = skip_last_attn
@property
def norm0(self):
return getattr(self, self.norm0_name)
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _pos_embeding(self, patched_img, hw_shape, pos_embed):
"""Positiong embeding method.
Resize the pos_embed, if the input image size doesn't match
the training size.
Args:
patched_img (torch.Tensor): The patched image, it should be
shape of [B, L1, C].
hw_shape (tuple): The downsampled image resolution.
pos_embed (torch.Tensor): The pos_embed weighs, it should be
shape of [B, L2, c].
Return:
torch.Tensor: The pos encoded image feature.
"""
assert patched_img.ndim == 3 and pos_embed.ndim == 3, \
'the shapes of patched_img and pos_embed must be [B, L, C]'
x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]
if x_len != pos_len:
if pos_len == (self.img_size[0] // self.patch_size) * (
self.img_size[1] // self.patch_size) + 1:
pos_h = self.img_size[0] // self.patch_size
pos_w = self.img_size[1] // self.patch_size
else:
raise ValueError(
'Unexpected shape of pos_embed, got {}.'.format(
pos_embed.shape))
pos_embed = self.resize_pos_embed(pos_embed, hw_shape,
(pos_h, pos_w),
self.interpolate_mode)
return self.drop_after_pos(patched_img + pos_embed)
@staticmethod
def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):
"""Resize pos_embed weights.
Resize pos_embed using bicubic interpolate method.
Args:
pos_embed (torch.Tensor): Position embedding weights.
input_shpae (tuple): Tuple for (downsampled input image height,
downsampled input image width).
pos_shape (tuple): The resolution of downsampled origin training
image.
mode (str): Algorithm used for upsampling:
``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |
``'trilinear'``. Default: ``'nearest'``
Return:
torch.Tensor: The resized pos_embed of shape [B, L_new, C]
"""
assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'
pos_h, pos_w = pos_shape
cls_token_weight = pos_embed[:, 0]
pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]
pos_embed_weight = pos_embed_weight.reshape(
1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)
pos_embed_weight = resize(
pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)
cls_token_weight = cls_token_weight.unsqueeze(1)
pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)
pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)
return pos_embed
def forward(self, inputs):
B = inputs.shape[0]
x, hw_shape = self.patch_embed(inputs)
# stole cls_tokens impl from Phil Wang, thanks
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x = self._pos_embeding(x, hw_shape, self.pos_embed)
if not self.with_cls_token:
# Remove class token for transformer encoder input
x = x[:, 1:]
if self.pre_norm:
x = self.norm0(x)
outs = []
for i, layer in enumerate(self.layers):
x, q, k, v = layer(x, self.return_qkv[i] \
or (i==len(self.layers)-1 and self.skip_last_attn))
if i == len(self.layers) - 1:
if self.final_norm:
x = self.norm1(x)
if self.return_qkv[i]:
v = self.norm1(v)
if self.skip_last_attn:
if self.with_cls_token:
x[:, 1:] = v[:, 1:]
else:
x = v
if i in self.out_indices:
if self.with_cls_token:
# Remove class token and reshape token for decoder head
out = x[:, 1:]
else:
out = x
B, _, C = out.shape
out = out.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
if self.output_cls_token:
out = [out, x[:, 0]]
if self.return_qkv[i]:
if self.with_cls_token:
q = q[:, 1:]
k = k[:, 1:]
v = v[:, 1:]
v = v.reshape(B, hw_shape[0], hw_shape[1],
C).permute(0, 3, 1, 2).contiguous()
out = [out, q, k, v]
outs.append(out)
return tuple(outs)
class maskClipFeatureExtractor(nn.Module):
"""Encoder Decoder segmentors.
EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.
Note that auxiliary_head is only used for deep supervision during training,
which could be dumped during inference.
"""
def __init__(self,
config,
test_cfg=dict(mode='whole'),
img_size=(224, 416),
preprocessing=None):
super(maskClipFeatureExtractor, self).__init__()
self.encoder = VisionTransformer()
self.decoder = MaskClipHead(text_embeddings_path=config['text_embeddings_path'],
visual_projs_path=config['visual_projs_path'],
text_categories=config['text_categories'])
self.align_corners = self.decoder.align_corners
self.num_classes = self.decoder.num_classes
self.test_cfg = test_cfg
self.checkpoint = config['maskclip_checkpoint']
self.encoder = load_checkpoint1(self.checkpoint, self.encoder)
self.img_size = img_size
for param in self.encoder.parameters():
param.requires_grad = False
for param in self.decoder.parameters():
param.requires_grad = False
# @auto_fp16(apply_to=('img', ))
# def forward(self, img, img_metas, return_loss=True, **kwargs):
def forward(self, img):
x = self.encoder(img)
feat, out = self.decoder(x)
feat = resize(
input=feat,
size=self.img_size,
mode='bilinear',
align_corners=True)
feat = F.normalize(feat, p=2, dim=1)
out = resize(
input=out,
size=self.img_size,
mode='bilinear',
align_corners=self.align_corners)
seg_pred = out.argmax(dim=1)
return feat, seg_pred
def show_result(self,
img,
result,
palette=None,
classes=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5,
gt=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (Tensor): The semantic segmentation results to draw over
`img`.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
if classes is not None:
self.CLASSES = classes
if palette is None:
if self.PALETTE is None:
# Get random state before set seed,
# and restore random state later.
# It will prevent loss of randomness, as the palette
# may be different in each iteration if not specified.
# See: https://github.com/open-mmlab/mmdetection/issues/5844
state = np.random.get_state()
np.random.seed(42)
# random palette
palette = np.random.randint(
0, 255, size=(len(self.CLASSES), 3))
np.random.set_state(state)
else:
palette = self.PALETTE
palette = np.array(palette)
assert palette.shape[0] == len(self.CLASSES), '({}) vs. ({})'.format(palette.shape[0], len(self.CLASSES))
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
if gt is not None:
# set the ignored area to black
img[gt == 255, :] = np.array([0, 0, 0])
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
| 37,830 | 37.603061 | 119 | py |
CLIP2Scene | CLIP2Scene-main/model/modules/resnet_encoder.py | import torch.nn as nn
from torchvision.models.resnet import ResNet
from torchvision.models.resnet import BasicBlock
from torchvision.models.resnet import Bottleneck
class ResNetEncoder(ResNet):
def __init__(self, **kwargs):
super().__init__(**kwargs)
del self.fc
del self.avgpool
def get_stages(self):
return [
nn.Identity(),
nn.Sequential(self.conv1, self.bn1, self.relu),
nn.Sequential(self.maxpool, self.layer1),
self.layer2,
self.layer3,
self.layer4,
]
def forward(self, x):
stages = self.get_stages()
features = []
for i in range(6):
x = stages[i](x)
features.append(x)
return features[5]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop("fc.bias", None)
state_dict.pop("fc.weight", None)
super().load_state_dict(state_dict, **kwargs)
resnet_encoders = {
"resnet18": {
"encoder": ResNetEncoder,
"params": {
"block": BasicBlock,
"layers": [2, 2, 2, 2],
},
},
"resnet50": {
"encoder": ResNetEncoder,
"params": {
"block": Bottleneck,
"layers": [3, 4, 6, 3],
},
},
}
| 1,314 | 22.070175 | 59 | py |
CLIP2Scene | CLIP2Scene-main/model/modules/resnet_block.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from model.modules.common import ConvType, NormType, get_norm, conv
from MinkowskiEngine import MinkowskiReLU
class BasicBlockBase(nn.Module):
expansion = 1
NORM_TYPE = NormType.BATCH_NORM
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3,
):
super(BasicBlockBase, self).__init__()
self.conv1 = conv(
inplanes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
conv_type=conv_type,
D=D,
)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=1,
dilation=dilation,
bias=False,
conv_type=conv_type,
D=D,
)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BasicBlock(BasicBlockBase):
NORM_TYPE = NormType.BATCH_NORM
class BottleneckBase(nn.Module):
expansion = 4
NORM_TYPE = NormType.BATCH_NORM
def __init__(
self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
conv_type=ConvType.HYPERCUBE,
bn_momentum=0.1,
D=3,
):
super(BottleneckBase, self).__init__()
self.conv1 = conv(inplanes, planes, kernel_size=1, D=D)
self.norm1 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv2 = conv(
planes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
conv_type=conv_type,
D=D,
)
self.norm2 = get_norm(self.NORM_TYPE, planes, D, bn_momentum=bn_momentum)
self.conv3 = conv(planes, planes * self.expansion, kernel_size=1, D=D)
self.norm3 = get_norm(
self.NORM_TYPE, planes * self.expansion, D, bn_momentum=bn_momentum
)
self.relu = MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(BottleneckBase):
NORM_TYPE = NormType.BATCH_NORM
| 3,375 | 23.114286 | 81 | py |
CLIP2Scene | CLIP2Scene-main/model/modules/dino/vision_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import os
import math
from functools import partial
import torch
import torch.nn as nn
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x, all=False):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if all:
return x
else:
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
elif model_name == "xcit_small_12_p16":
url = "dino_xcit_small_12_p16_pretrain/dino_xcit_small_12_p16_pretrain.pth"
elif model_name == "xcit_small_12_p8":
url = "dino_xcit_small_12_p8_pretrain/dino_xcit_small_12_p8_pretrain.pth"
elif model_name == "xcit_medium_24_p16":
url = "dino_xcit_medium_24_p16_pretrain/dino_xcit_medium_24_p16_pretrain.pth"
elif model_name == "xcit_medium_24_p8":
url = "dino_xcit_medium_24_p8_pretrain/dino_xcit_medium_24_p8_pretrain.pth"
elif model_name == "resnet50":
url = "dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| 15,379 | 40.013333 | 124 | py |
NSVF | NSVF-main/setup.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
import glob
# build clib
# _ext_src_root = "fairnr/clib"
import os
_ext_src_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fairnr/clib")
_ext_sources = glob.glob("{}/src/*.cpp".format(_ext_src_root)) + glob.glob(
"{}/src/*.cu".format(_ext_src_root)
)
_ext_headers = glob.glob("{}/include/*".format(_ext_src_root))
setup(
name='fairnr',
ext_modules=[
CUDAExtension(
name='fairnr.clib._ext',
sources=_ext_sources,
extra_compile_args={
"cxx": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
"nvcc": ["-O2", "-I{}".format("{}/include".format(_ext_src_root))],
},
)
],
cmdclass={
'build_ext': BuildExtension
},
entry_points={
'console_scripts': [
'fairnr-render = fairnr_cli.render:cli_main',
'fairnr-train = fairseq_cli.train:cli_main'
],
},
)
| 1,224 | 28.878049 | 87 | py |
NSVF | NSVF-main/fairnr/renderer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file is to simulate "generator" in fairseq
"""
import os, tempfile, shutil, glob
import time
import torch
import numpy as np
import logging
import imageio
from torchvision.utils import save_image
from fairnr.data import trajectory, geometry, data_utils
from fairseq.meters import StopwatchMeter
from fairnr.data.data_utils import recover_image, get_uv, parse_views
from pathlib import Path
logger = logging.getLogger(__name__)
class NeuralRenderer(object):
def __init__(self,
resolution="512x512",
frames=501,
speed=5,
raymarching_steps=None,
path_gen=None,
beam=10,
at=(0,0,0),
up=(0,1,0),
output_dir=None,
output_type=None,
fps=24,
test_camera_poses=None,
test_camera_intrinsics=None,
test_camera_views=None):
self.frames = frames
self.speed = speed
self.raymarching_steps = raymarching_steps
self.path_gen = path_gen
if isinstance(resolution, str):
self.resolution = [int(r) for r in resolution.split('x')]
else:
self.resolution = [resolution, resolution]
self.beam = beam
self.output_dir = output_dir
self.output_type = output_type
self.at = at
self.up = up
self.fps = fps
if self.path_gen is None:
self.path_gen = trajectory.circle()
if self.output_type is None:
self.output_type = ["rgb"]
if test_camera_intrinsics is not None:
self.test_int = data_utils.load_intrinsics(test_camera_intrinsics)
else:
self.test_int = None
self.test_frameids = None
if test_camera_poses is not None:
if os.path.isdir(test_camera_poses):
self.test_poses = [
np.loadtxt(f)[None, :, :] for f in sorted(glob.glob(test_camera_poses + "/*.txt"))]
self.test_poses = np.concatenate(self.test_poses, 0)
else:
self.test_poses = data_utils.load_matrix(test_camera_poses)
if self.test_poses.shape[1] == 17:
self.test_frameids = self.test_poses[:, -1].astype(np.int32)
self.test_poses = self.test_poses[:, :-1]
self.test_poses = self.test_poses.reshape(-1, 4, 4)
if test_camera_views is not None:
render_views = parse_views(test_camera_views)
self.test_poses = np.stack([self.test_poses[r] for r in render_views])
else:
self.test_poses = None
def generate_rays(self, t, intrinsics, img_size, inv_RT=None, action='none'):
if inv_RT is None:
cam_pos = torch.tensor(self.path_gen(t * self.speed / 180 * np.pi),
device=intrinsics.device, dtype=intrinsics.dtype)
cam_rot = geometry.look_at_rotation(cam_pos, at=self.at, up=self.up, inverse=True, cv=True)
inv_RT = cam_pos.new_zeros(4, 4)
inv_RT[:3, :3] = cam_rot
inv_RT[:3, 3] = cam_pos
inv_RT[3, 3] = 1
else:
inv_RT = torch.from_numpy(inv_RT).type_as(intrinsics)
h, w, rh, rw = img_size[0], img_size[1], img_size[2], img_size[3]
if self.test_int is not None:
uv = torch.from_numpy(get_uv(h, w, h, w)[0]).type_as(intrinsics)
intrinsics = self.test_int
else:
uv = torch.from_numpy(get_uv(h * rh, w * rw, h, w)[0]).type_as(intrinsics)
uv = uv.reshape(2, -1)
return uv, inv_RT
def parse_sample(self,sample):
if len(sample) == 1:
return sample[0], 0, self.frames
elif len(sample) == 2:
return sample[0], sample[1], self.frames
elif len(sample) == 3:
return sample[0], sample[1], sample[2]
else:
raise NotImplementedError
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = models[0]
model.eval()
logger.info("rendering starts. {}".format(model.text))
output_path = self.output_dir
image_names = []
sample, step, frames = self.parse_sample(sample)
# fix the rendering size
a = sample['size'][0,0,0] / self.resolution[0]
b = sample['size'][0,0,1] / self.resolution[1]
sample['size'][:, :, 0] /= a
sample['size'][:, :, 1] /= b
sample['size'][:, :, 2] *= a
sample['size'][:, :, 3] *= b
for shape in range(sample['shape'].size(0)):
max_step = step + frames
while step < max_step:
next_step = min(step + self.beam, max_step)
uv, inv_RT = zip(*[
self.generate_rays(
k,
sample['intrinsics'][shape],
sample['size'][shape, 0],
self.test_poses[k] if self.test_poses is not None else None)
for k in range(step, next_step)
])
if self.test_frameids is not None:
assert next_step - step == 1
ids = torch.tensor(self.test_frameids[step: next_step]).type_as(sample['id'])
else:
ids = sample['id'][shape:shape+1]
real_images = sample['full_rgb'] if 'full_rgb' in sample else sample['colors']
real_images = real_images.transpose(2, 3) if real_images.size(-1) != 3 else real_images
_sample = {
'id': ids,
'colors': torch.cat([real_images[shape:shape+1] for _ in range(step, next_step)], 1),
'intrinsics': sample['intrinsics'][shape:shape+1],
'extrinsics': torch.stack(inv_RT, 0).unsqueeze(0),
'uv': torch.stack(uv, 0).unsqueeze(0),
'shape': sample['shape'][shape:shape+1],
'view': torch.arange(
step, next_step,
device=sample['shape'].device).unsqueeze(0),
'size': torch.cat([sample['size'][shape:shape+1] for _ in range(step, next_step)], 1),
'step': step
}
with data_utils.GPUTimer() as timer:
outs = model(**_sample)
logger.info("rendering frame={}\ttotal time={:.4f}".format(step, timer.sum))
for k in range(step, next_step):
images = model.visualize(_sample, None, 0, k-step)
image_name = "{:04d}".format(k)
for key in images:
name, type = key.split('/')[0].split('_')
if type in self.output_type:
if name == 'coarse':
type = 'coarse-' + type
if name == 'target':
continue
prefix = os.path.join(output_path, type)
Path(prefix).mkdir(parents=True, exist_ok=True)
if type == 'point':
data_utils.save_point_cloud(
os.path.join(prefix, image_name + '.ply'),
images[key][:, :3].cpu().numpy(),
(images[key][:, 3:] * 255).cpu().int().numpy())
# from fairseq import pdb; pdb.set_trace()
else:
image = images[key].permute(2, 0, 1) \
if images[key].dim() == 3 else torch.stack(3*[images[key]], 0)
save_image(image, os.path.join(prefix, image_name + '.png'), format=None)
image_names.append(os.path.join(prefix, image_name + '.png'))
# save pose matrix
prefix = os.path.join(output_path, 'pose')
Path(prefix).mkdir(parents=True, exist_ok=True)
pose = self.test_poses[k] if self.test_poses is not None else inv_RT[k-step].cpu().numpy()
np.savetxt(os.path.join(prefix, image_name + '.txt'), pose)
step = next_step
logger.info("done")
return step, image_names
def save_images(self, output_files, steps=None, combine_output=True):
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
timestamp = time.strftime('%Y-%m-%d.%H-%M-%S',time.localtime(time.time()))
if steps is not None:
timestamp = "step_{}.".format(steps) + timestamp
if not combine_output:
for type in self.output_type:
images = [imageio.imread(file_path) for file_path in output_files if type in file_path]
# imageio.mimsave('{}/{}_{}.gif'.format(self.output_dir, type, timestamp), images, fps=self.fps)
imageio.mimwrite('{}/{}_{}.mp4'.format(self.output_dir, type, timestamp), images, fps=self.fps, quality=8)
else:
images = [[imageio.imread(file_path) for file_path in output_files if type == file_path.split('/')[-2]] for type in self.output_type]
images = [np.concatenate([images[j][i] for j in range(len(images))], 1) for i in range(len(images[0]))]
imageio.mimwrite('{}/{}_{}.mp4'.format(self.output_dir, 'full', timestamp), images, fps=self.fps, quality=8)
return timestamp
def merge_videos(self, timestamps):
logger.info("mergining mp4 files..")
timestamp = time.strftime('%Y-%m-%d.%H-%M-%S',time.localtime(time.time()))
writer = imageio.get_writer(
os.path.join(self.output_dir, 'full_' + timestamp + '.mp4'), fps=self.fps)
for timestamp in timestamps:
tempfile = os.path.join(self.output_dir, 'full_' + timestamp + '.mp4')
reader = imageio.get_reader(tempfile)
for im in reader:
writer.append_data(im)
writer.close()
for timestamp in timestamps:
tempfile = os.path.join(self.output_dir, 'full_' + timestamp + '.mp4')
os.remove(tempfile) | 10,725 | 41.904 | 145 | py |
NSVF | NSVF-main/fairnr/options.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import torch
from fairseq import options
def parse_args_and_arch(*args, **kwargs):
return options.parse_args_and_arch(*args, **kwargs)
def get_rendering_parser(default_task="single_object_rendering"):
parser = options.get_parser("Rendering", default_task)
options.add_dataset_args(parser, gen=True)
add_rendering_args(parser)
return parser
def add_rendering_args(parser):
group = parser.add_argument_group("Rendering")
options.add_common_eval_args(group)
group.add_argument("--render-beam", default=5, type=int, metavar="N",
help="beam size for parallel rendering")
group.add_argument("--render-resolution", default="512x512", type=str, metavar="N", help='if provide two numbers, means H x W')
group.add_argument("--render-angular-speed", default=1, type=float, metavar="D",
help="angular speed when rendering around the object")
group.add_argument("--render-num-frames", default=500, type=int, metavar="N")
group.add_argument("--render-path-style", default="circle", choices=["circle", "zoomin_circle", "zoomin_line"], type=str)
group.add_argument("--render-path-args", default="{'radius': 2.5, 'h': 0.0}",
help="specialized arguments for rendering paths")
group.add_argument("--render-output", default=None, type=str)
group.add_argument("--render-at-vector", default="(0,0,0)", type=str)
group.add_argument("--render-up-vector", default="(0,0,-1)", type=str)
group.add_argument("--render-output-types", nargs="+", type=str, default=["color"],
choices=["target", "color", "depth", "normal", "voxel", "predn", "point", "featn2", "vcolors"])
group.add_argument("--render-raymarching-steps", default=None, type=int)
group.add_argument("--render-save-fps", default=24, type=int)
group.add_argument("--render-combine-output", action='store_true',
help="if set, concat the images into one file.")
group.add_argument("--render-camera-poses", default=None, type=str,
help="text file saved for the testing trajectories")
group.add_argument("--render-camera-intrinsics", default=None, type=str)
group.add_argument("--render-views", type=str, default=None,
help="views sampled for rendering, you can set specific view id, or a range") | 2,595 | 50.92 | 131 | py |
NSVF | NSVF-main/fairnr/modules/renderer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairnr.modules.module_utils import FCLayer
from fairnr.data.geometry import ray
MAX_DEPTH = 10000.0
RENDERER_REGISTRY = {}
def register_renderer(name):
def register_renderer_cls(cls):
if name in RENDERER_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
RENDERER_REGISTRY[name] = cls
return cls
return register_renderer_cls
def get_renderer(name):
if name not in RENDERER_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return RENDERER_REGISTRY[name]
@register_renderer('abstract_renderer')
class Renderer(nn.Module):
"""
Abstract class for ray marching
"""
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
@register_renderer('volume_rendering')
class VolumeRenderer(Renderer):
def __init__(self, args):
super().__init__(args)
self.chunk_size = 1024 * getattr(args, "chunk_size", 64)
self.valid_chunk_size = 1024 * getattr(args, "valid_chunk_size", self.chunk_size // 1024)
self.discrete_reg = getattr(args, "discrete_regularization", False)
self.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0.0)
self.trace_normal = getattr(args, "trace_normal", False)
@staticmethod
def add_args(parser):
# ray-marching parameters
parser.add_argument('--discrete-regularization', action='store_true',
help='if set, a zero mean unit variance gaussian will be added to encougrage discreteness')
# additional arguments
parser.add_argument('--chunk-size', type=int, metavar='D',
help='set chunks to go through the network (~K forward passes). trade time for memory. ')
parser.add_argument('--valid-chunk-size', type=int, metavar='D',
help='chunk size used when no training. In default the same as chunk-size.')
parser.add_argument('--raymarching-tolerance', type=float, default=0)
parser.add_argument('--trace-normal', action='store_true')
def forward_once(
self, input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
early_stop=None, output_types=['sigma', 'texture']
):
"""
chunks: set > 1 if out-of-memory. it can save some memory by time.
"""
sampled_depth = samples['sampled_point_depth']
sampled_idx = samples['sampled_point_voxel_idx'].long()
# only compute when the ray hits
sample_mask = sampled_idx.ne(-1)
if early_stop is not None:
sample_mask = sample_mask & (~early_stop.unsqueeze(-1))
if sample_mask.sum() == 0: # miss everything skip
return None, 0
sampled_xyz = ray(ray_start.unsqueeze(1), ray_dir.unsqueeze(1), sampled_depth.unsqueeze(2))
sampled_dir = ray_dir.unsqueeze(1).expand(*sampled_depth.size(), ray_dir.size()[-1])
samples['sampled_point_xyz'] = sampled_xyz
samples['sampled_point_ray_direction'] = sampled_dir
# apply mask
samples = {name: s[sample_mask] for name, s in samples.items()}
# get encoder features as inputs
field_inputs = input_fn(samples, encoder_states)
# forward implicit fields
field_outputs = field_fn(field_inputs, outputs=output_types)
outputs = {'sample_mask': sample_mask}
def masked_scatter(mask, x):
B, K = mask.size()
if x.dim() == 1:
return x.new_zeros(B, K).masked_scatter(mask, x)
return x.new_zeros(B, K, x.size(-1)).masked_scatter(
mask.unsqueeze(-1).expand(B, K, x.size(-1)), x)
# post processing
if 'sigma' in field_outputs:
sigma, sampled_dists= field_outputs['sigma'], field_inputs['dists']
noise = 0 if not self.discrete_reg and (not self.training) else torch.zeros_like(sigma).normal_()
free_energy = torch.relu(noise + sigma) * sampled_dists
free_energy = free_energy * 7.0 # ? [debug]
# (optional) free_energy = (F.elu(sigma - 3, alpha=1) + 1) * dists
# (optional) free_energy = torch.abs(sigma) * sampled_dists ## ??
outputs['free_energy'] = masked_scatter(sample_mask, free_energy)
if 'sdf' in field_outputs:
outputs['sdf'] = masked_scatter(sample_mask, field_outputs['sdf'])
if 'texture' in field_outputs:
outputs['texture'] = masked_scatter(sample_mask, field_outputs['texture'])
if 'normal' in field_outputs:
outputs['normal'] = masked_scatter(sample_mask, field_outputs['normal'])
if 'feat_n2' in field_outputs:
outputs['feat_n2'] = masked_scatter(sample_mask, field_outputs['feat_n2'])
return outputs, sample_mask.sum()
def forward_chunk(
self, input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
gt_depths=None, output_types=['sigma', 'texture'], global_weights=None,
):
if self.trace_normal:
output_types += ['normal']
sampled_depth = samples['sampled_point_depth']
sampled_idx = samples['sampled_point_voxel_idx'].long()
original_depth = samples.get('original_point_depth', None)
tolerance = self.raymarching_tolerance
chunk_size = self.chunk_size if self.training else self.valid_chunk_size
early_stop = None
if tolerance > 0:
tolerance = -math.log(tolerance)
hits = sampled_idx.ne(-1).long()
outputs = defaultdict(lambda: [])
size_so_far, start_step = 0, 0
accumulated_free_energy = 0
accumulated_evaluations = 0
for i in range(hits.size(1) + 1):
if ((i == hits.size(1)) or (size_so_far + hits[:, i].sum() > chunk_size)) and (i > start_step):
_outputs, _evals = self.forward_once(
input_fn, field_fn,
ray_start, ray_dir,
{name: s[:, start_step: i]
for name, s in samples.items()},
encoder_states,
early_stop=early_stop,
output_types=output_types)
if _outputs is not None:
accumulated_evaluations += _evals
if 'free_energy' in _outputs:
accumulated_free_energy += _outputs['free_energy'].sum(1)
if tolerance > 0:
early_stop = accumulated_free_energy > tolerance
hits[early_stop] *= 0
for key in _outputs:
outputs[key] += [_outputs[key]]
else:
for key in outputs:
outputs[key] += [outputs[key][-1].new_zeros(
outputs[key][-1].size(0),
sampled_depth[:, start_step: i].size(1),
*outputs[key][-1].size()[2:]
)]
start_step, size_so_far = i, 0
if (i < hits.size(1)):
size_so_far += hits[:, i].sum()
outputs = {key: torch.cat(outputs[key], 1) for key in outputs}
results = {}
if 'free_energy' in outputs:
free_energy = outputs['free_energy']
shifted_free_energy = torch.cat([free_energy.new_zeros(sampled_depth.size(0), 1), free_energy[:, :-1]], dim=-1) # shift one step
a = 1 - torch.exp(-free_energy.float()) # probability of it is not empty here
b = torch.exp(-torch.cumsum(shifted_free_energy.float(), dim=-1)) # probability of everything is empty up to now
probs = (a * b).type_as(free_energy) # probability of the ray hits something here
else:
probs = outputs['sample_mask'].type_as(sampled_depth) / sampled_depth.size(-1) # assuming a uniform distribution
if global_weights is not None:
probs = probs * global_weights
depth = (sampled_depth * probs).sum(-1)
missed = 1 - probs.sum(-1)
results.update({
'probs': probs, 'depths': depth,
'max_depths': sampled_depth.masked_fill(hits.eq(0), -1).max(1).values,
'min_depths': sampled_depth.min(1).values,
'missed': missed, 'ae': accumulated_evaluations
})
if original_depth is not None:
results['z'] = (original_depth * probs).sum(-1)
if 'texture' in outputs:
results['colors'] = (outputs['texture'] * probs.unsqueeze(-1)).sum(-2)
if 'normal' in outputs:
results['normal'] = (outputs['normal'] * probs.unsqueeze(-1)).sum(-2)
if not self.trace_normal:
results['eikonal-term'] = (outputs['normal'].norm(p=2, dim=-1) - 1) ** 2
else:
results['eikonal-term'] = torch.log((outputs['normal'] ** 2).sum(-1) + 1e-6)
results['eikonal-term'] = results['eikonal-term'][sampled_idx.ne(-1)]
if 'feat_n2' in outputs:
results['feat_n2'] = (outputs['feat_n2'] * probs).sum(-1)
results['regz-term'] = outputs['feat_n2'][sampled_idx.ne(-1)]
return results
def forward(self, input_fn, field_fn, ray_start, ray_dir, samples, *args, **kwargs):
chunk_size = self.chunk_size if self.training else self.valid_chunk_size
if ray_start.size(0) <= chunk_size:
results = self.forward_chunk(input_fn, field_fn, ray_start, ray_dir, samples, *args, **kwargs)
else:
# the number of rays is larger than maximum forward passes. pre-chuncking..
results = [
self.forward_chunk(input_fn, field_fn,
ray_start[i: i+chunk_size], ray_dir[i: i+chunk_size],
{name: s[i: i+chunk_size] for name, s in samples.items()}, *args, **kwargs)
for i in range(0, ray_start.size(0), chunk_size)
]
results = {name: torch.cat([r[name] for r in results], 0)
if results[0][name].dim() > 0 else sum([r[name] for r in results])
for name in results[0]}
if getattr(input_fn, "track_max_probs", False) and (not self.training):
input_fn.track_voxel_probs(samples['sampled_point_voxel_idx'].long(), results['probs'])
return results
@register_renderer('surface_volume_rendering')
class SurfaceVolumeRenderer(VolumeRenderer):
def forward_chunk(
self, input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
gt_depths=None, output_types=['sigma', 'texture'], global_weights=None,
):
results = super().forward_chunk(
input_fn, field_fn, ray_start, ray_dir, samples, encoder_states,
output_types=['sigma', 'normal'])
# render at the "intersection"
n_probs = results['probs'].clamp(min=1e-6).masked_fill(samples['sampled_point_voxel_idx'].eq(-1), 0)
n_depth = (samples['sampled_point_depth'] * n_probs).sum(-1, keepdim=True) / n_probs.sum(-1, keepdim=True).clamp(min=1e-6)
n_bound = samples['sampled_point_depth'] + samples['sampled_point_distance'] / 2
n_vidxs = ((n_depth - n_bound) >= 0).sum(-1, keepdim=True)
n_vidxs = samples['sampled_point_voxel_idx'].gather(1, n_vidxs)
new_samples = {
'sampled_point_depth': n_depth,
'sampled_point_distance': torch.ones_like(n_depth) * 1e-3, # dummy distance. not useful.
'sampled_point_voxel_idx': n_vidxs,
}
new_results, _ = self.forward_once(input_fn, field_fn, ray_start, ray_dir, new_samples, encoder_states)
results['colors'] = new_results['texture'].squeeze(1) * (1 - results['missed'][:, None])
results['normal'] = new_results['normal'].squeeze(1)
results['eikonal-term'] = torch.cat([results['eikonal-term'], (results['normal'].norm(p=2, dim=-1) - 1) ** 2], 0)
return results | 12,718 | 44.102837 | 141 | py |
NSVF | NSVF-main/fairnr/modules/reader.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import random, os, glob
from fairnr.data.geometry import get_ray_direction, r6d2mat
torch.autograd.set_detect_anomaly(True)
TINY = 1e-9
READER_REGISTRY = {}
def register_reader(name):
def register_reader_cls(cls):
if name in READER_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
READER_REGISTRY[name] = cls
return cls
return register_reader_cls
def get_reader(name):
if name not in READER_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return READER_REGISTRY[name]
@register_reader('abstract_reader')
class Reader(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
@register_reader('image_reader')
class ImageReader(Reader):
"""
basic image reader
"""
def __init__(self, args):
super().__init__(args)
self.num_pixels = args.pixel_per_view
self.no_sampling = getattr(args, "no_sampling_at_reader", False)
self.deltas = None
self.all_data = self.find_data()
if getattr(args, "trainable_extrinsics", False):
self.all_data_idx = {data_img: (s, v)
for s, data in enumerate(self.all_data)
for v, data_img in enumerate(data)}
self.deltas = nn.ParameterList([
nn.Parameter(torch.tensor(
[[1., 0., 0., 0., 1., 0., 0., 0., 0.]]).repeat(len(data), 1))
for data in self.all_data])
def find_data(self):
paths = self.args.data
if os.path.isdir(paths):
self.paths = [paths]
else:
self.paths = [line.strip() for line in open(paths)]
return [sorted(glob.glob("{}/rgb/*".format(p))) for p in self.paths]
@staticmethod
def add_args(parser):
parser.add_argument('--pixel-per-view', type=float, metavar='N',
help='number of pixels sampled for each view')
parser.add_argument("--sampling-on-mask", nargs='?', const=0.9, type=float,
help="this value determined the probability of sampling rays on masks")
parser.add_argument("--sampling-at-center", type=float,
help="only useful for training where we restrict sampling at center of the image")
parser.add_argument("--sampling-on-bbox", action='store_true',
help="sampling points to close to the mask")
parser.add_argument("--sampling-patch-size", type=int,
help="sample pixels based on patches instead of independent pixels")
parser.add_argument("--sampling-skipping-size", type=int,
help="sample pixels if we have skipped pixels")
parser.add_argument("--no-sampling-at-reader", action='store_true',
help="do not perform sampling.")
parser.add_argument("--trainable-extrinsics", action='store_true',
help="if set, we assume extrinsics are trainable. We use 6D representations for rotation")
def forward(self, uv, intrinsics, extrinsics, size, path=None, **kwargs):
S, V = uv.size()[:2]
if (not self.training) or self.no_sampling:
uv = uv.reshape(S, V, 2, -1, 1, 1)
flatten_uv = uv.reshape(S, V, 2, -1)
else:
uv, _ = self.sample_pixels(uv, size, **kwargs)
flatten_uv = uv.reshape(S, V, 2, -1)
# go over all shapes
ray_start, ray_dir = [[] for _ in range(S)], [[] for _ in range(S)]
for s in range(S):
for v in range(V):
ixt = intrinsics[s] if intrinsics.dim() == 3 else intrinsics[s, v]
ext = extrinsics[s, v]
translation, rotation = ext[:3, 3], ext[:3, :3]
if (self.deltas is not None) and (path is not None):
shape_id, view_id = self.all_data_idx[path[s][v]]
delta = self.deltas[shape_id][view_id]
d_t, d_r = delta[6:], r6d2mat(delta[None, :6]).squeeze(0)
rotation = rotation @ d_r
translation = translation + d_t
ext = torch.cat([torch.cat([rotation, translation[:, None]], 1), ext[3:]], 0)
ray_start[s] += [translation]
ray_dir[s] += [get_ray_direction(translation, flatten_uv[s, v], ixt, ext, 1)]
ray_start = torch.stack([torch.stack(r) for r in ray_start])
ray_dir = torch.stack([torch.stack(r) for r in ray_dir])
return ray_start.unsqueeze(-2), ray_dir.transpose(2, 3), uv
@torch.no_grad()
def sample_pixels(self, uv, size, alpha=None, mask=None, **kwargs):
H, W = int(size[0,0,0]), int(size[0,0,1])
S, V = uv.size()[:2]
if mask is None:
if alpha is not None:
mask = (alpha > 0)
else:
mask = uv.new_ones(S, V, uv.size(-1)).bool()
mask = mask.float().reshape(S, V, H, W)
if self.args.sampling_at_center < 1.0:
r = (1 - self.args.sampling_at_center) / 2.0
mask0 = mask.new_zeros(S, V, H, W)
mask0[:, :, int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
mask = mask * mask0
if self.args.sampling_on_bbox:
x_has_points = mask.sum(2, keepdim=True) > 0
y_has_points = mask.sum(3, keepdim=True) > 0
mask = (x_has_points & y_has_points).float()
probs = mask / (mask.sum() + 1e-8)
if self.args.sampling_on_mask > 0.0:
probs = self.args.sampling_on_mask * probs + (1 - self.args.sampling_on_mask) * 1.0 / (H * W)
num_pixels = int(self.args.pixel_per_view)
patch_size, skip_size = self.args.sampling_patch_size, self.args.sampling_skipping_size
C = patch_size * skip_size
if C > 1:
probs = probs.reshape(S, V, H // C, C, W // C, C).sum(3).sum(-1)
num_pixels = num_pixels // patch_size // patch_size
flatten_probs = probs.reshape(S, V, -1)
sampled_index = sampling_without_replacement(torch.log(flatten_probs+ TINY), num_pixels)
sampled_masks = torch.zeros_like(flatten_probs).scatter_(-1, sampled_index, 1).reshape(S, V, H // C, W // C)
if C > 1:
sampled_masks = sampled_masks[:, :, :, None, :, None].repeat(
1, 1, 1, patch_size, 1, patch_size).reshape(S, V, H // skip_size, W // skip_size)
if skip_size > 1:
full_datamask = sampled_masks.new_zeros(S, V, skip_size * skip_size, H // skip_size, W // skip_size)
full_index = torch.randint(skip_size*skip_size, (S, V))
for i in range(S):
for j in range(V):
full_datamask[i, j, full_index[i, j]] = sampled_masks[i, j]
sampled_masks = full_datamask.reshape(
S, V, skip_size, skip_size, H // skip_size, W // skip_size).permute(0, 1, 4, 2, 5, 3).reshape(S, V, H, W)
X, Y = uv[:,:,0].reshape(S, V, H, W), uv[:,:,1].reshape(S, V, H, W)
X = X[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
Y = Y[sampled_masks>0].reshape(S, V, 1, -1, patch_size, patch_size)
return torch.cat([X, Y], 2), sampled_masks
def sampling_without_replacement(logp, k):
def gumbel_like(u):
return -torch.log(-torch.log(torch.rand_like(u) + TINY) + TINY)
scores = logp + gumbel_like(logp)
return scores.topk(k, dim=-1)[1] | 7,959 | 42.736264 | 125 | py |
NSVF | NSVF-main/fairnr/modules/field.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import grad
from collections import OrderedDict
from fairnr.modules.implicit import (
ImplicitField, SignedDistanceField,
TextureField, HyperImplicitField, BackgroundField
)
from fairnr.modules.module_utils import NeRFPosEmbLinear
FIELD_REGISTRY = {}
def register_field(name):
def register_field_cls(cls):
if name in FIELD_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
FIELD_REGISTRY[name] = cls
return cls
return register_field_cls
def get_field(name):
if name not in FIELD_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return FIELD_REGISTRY[name]
@register_field('abstract_field')
class Field(nn.Module):
"""
Abstract class for implicit functions
"""
def __init__(self, args):
super().__init__()
self.args = args
self.updates = -1
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
def set_num_updates(self, num_updates):
self.updates = num_updates
@register_field('radiance_field')
class RaidanceField(Field):
def __init__(self, args):
super().__init__(args)
# additional arguments
self.chunk_size = getattr(args, "chunk_size", 256) * 256
self.deterministic_step = getattr(args, "deterministic_step", False)
# background field
self.min_color = getattr(args, "min_color", -1)
self.trans_bg = getattr(args, "transparent_background", "1.0,1.0,1.0")
self.sgbg = getattr(args, "background_stop_gradient", False)
self.bg_color = BackgroundField(bg_color=self.trans_bg, min_color=self.min_color, stop_grad=self.sgbg)
# MLP specs
self.nerf_style = getattr(args, "nerf_style_mlp", False) # NeRF style MLPs
self.with_ln = not getattr(args, "no_layernorm_mlp", False)
self.skips = getattr(args, "feature_field_skip_connect", None)
self.skips = [self.skips] if self.skips is not None else None
# input specs
self.den_filters, self.den_ori_dims, self.den_input_dims = self.parse_inputs(args.inputs_to_density)
self.tex_filters, self.tex_ori_dims, self.tex_input_dims = self.parse_inputs(args.inputs_to_texture)
self.den_filters, self.tex_filters = nn.ModuleDict(self.den_filters), nn.ModuleDict(self.tex_filters)
# build networks
self.build_feature_field(args)
self.build_density_predictor(args)
self.build_texture_renderer(args)
if getattr(args, "zero_z_steps", 0) > 0:
self.register_buffer("zero_z", torch.scalar_tensor(1)) # it will be saved to checkpoint
else:
self.zero_z = 0
def set_num_updates(self, updates):
self.updates = updates
if getattr(self.args, "zero_z_steps", 0) <= self.updates:
self.zero_z = self.zero_z * 0
def build_feature_field(self, args):
den_feat_dim = self.tex_input_dims[0]
den_input_dim, tex_input_dim = sum(self.den_input_dims), sum(self.tex_input_dims)
if not getattr(args, "hypernetwork", False):
self.feature_field = ImplicitField(
den_input_dim, den_feat_dim, args.feature_embed_dim,
args.feature_layers + 2 if not self.nerf_style else 8, # +2 is to adapt to old code
with_ln=self.with_ln if not self.nerf_style else False,
skips=self.skips if not self.nerf_style else [4],
spec_init=True if not self.nerf_style else False)
else:
assert (not self.nerf_style), "Hypernetwork does not support NeRF style MLPs yet."
den_contxt_dim = self.den_input_dims[-1]
self.feature_field = HyperImplicitField(
den_contxt_dim, den_input_dim - den_contxt_dim,
den_feat_dim, args.feature_embed_dim, args.feature_layers + 2) # +2 is to adapt to old code
def build_density_predictor(self, args):
den_feat_dim = self.tex_input_dims[0]
self.predictor = SignedDistanceField(
den_feat_dim, args.density_embed_dim, recurrent=False, num_layers=1,
with_ln=self.with_ln if not self.nerf_style else False,
spec_init=True if not self.nerf_style else False)
def build_texture_renderer(self, args):
tex_input_dim = sum(self.tex_input_dims)
self.renderer = TextureField(
tex_input_dim, args.texture_embed_dim,
args.texture_layers + 2 if not self.nerf_style else 2,
with_ln=self.with_ln if not self.nerf_style else False,
spec_init=True if not self.nerf_style else False)
def parse_inputs(self, arguments):
def fillup(p):
assert len(p) > 0
default = 'b' if (p[0] != 'ray') and (p[0] != 'normal') else 'a'
if len(p) == 1:
return [p[0], 0, 3, default]
elif len(p) == 2:
return [p[0], int(p[1]), 3, default]
elif len(p) == 3:
return [p[0], int(p[1]), int(p[2]), default]
return [p[0], int(p[1]), int(p[2]), p[3]]
filters, input_dims, output_dims = OrderedDict(), [], []
for p in arguments.split(','):
name, pos_dim, base_dim, pos_type = fillup([a.strip() for a in p.strip().split(':')])
if pos_dim > 0: # use positional embedding
func = NeRFPosEmbLinear(
base_dim, base_dim * pos_dim * 2,
angular=(pos_type == 'a'),
no_linear=True,
cat_input=(pos_type == 'b'))
odim = func.out_dim + func.in_dim if func.cat_input else func.out_dim
else:
func = nn.Identity()
odim = base_dim
input_dims += [base_dim]
output_dims += [odim]
filters[name] = func
return filters, input_dims, output_dims
@staticmethod
def add_args(parser):
parser.add_argument('--inputs-to-density', type=str,
help="""
Types of inputs to predict the density.
Choices of types are emb or pos.
use first . to assign sinsudoal frequency.
use second : to assign the input dimension (in default 3).
use third : to set the type -> basic, angular or gaussian
Size must match
e.g. --inputs-to-density emb:6:32,pos:4
""")
parser.add_argument('--inputs-to-texture', type=str,
help="""
Types of inputs to predict the texture.
Choices of types are feat, emb, ray, pos or normal.
""")
parser.add_argument('--nerf-style-mlp', action='store_true',
help='use NeRF style MLPs for implicit function (with skip-connection).')
parser.add_argument('--no-layernorm-mlp', action='store_true',
help='do not use layernorm in MLPs.')
parser.add_argument('--feature-field-skip-connect', type=int,
help='add skip-connection in the feature field.')
parser.add_argument('--feature-embed-dim', type=int, metavar='N',
help='field hidden dimension for FFN')
parser.add_argument('--density-embed-dim', type=int, metavar='N',
help='hidden dimension of density prediction'),
parser.add_argument('--texture-embed-dim', type=int, metavar='N',
help='hidden dimension of texture prediction')
parser.add_argument('--feature-layers', type=int, metavar='N',
help='number of FC layers used to encode')
parser.add_argument('--texture-layers', type=int, metavar='N',
help='number of FC layers used to predict colors')
parser.add_argument('--no-normalize-normal', action='store_true',
help='if set, do not normalize the gradient of density as the normal direction.')
parser.add_argument('--zero-z-steps', type=int, default=0)
# specific parameters (hypernetwork does not work right now)
parser.add_argument('--hypernetwork', action='store_true',
help='use hypernetwork to model feature')
parser.add_argument('--hyper-feature-embed-dim', type=int, metavar='N',
help='feature dimension used to predict the hypernetwork. consistent with context embedding')
# backgound parameters
parser.add_argument('--background-depth', type=float,
help='the depth of background. used for depth visualization')
parser.add_argument('--background-stop-gradient', action='store_true',
help='do not optimize the background color')
@torch.enable_grad() # tracking the gradient in case we need to have normal at testing time.
def forward(self, inputs, outputs=['sigma', 'texture']):
filtered_inputs, context = [], None
if inputs.get('feat', None) is None:
for i, name in enumerate(self.den_filters):
d_in, func = self.den_ori_dims[i], self.den_filters[name]
assert (name in inputs), "the encoder must contain target inputs"
assert inputs[name].size(-1) == d_in, "{} dimension must match {} v.s. {}".format(
name, inputs[name].size(-1), d_in)
if name == 'context':
assert (i == (len(self.den_filters) - 1)), "we force context as the last input"
assert inputs[name].size(0) == 1, "context is object level"
context = func(inputs[name])
else:
filtered_inputs += [func(inputs[name])]
filtered_inputs = torch.cat(filtered_inputs, -1)
if context is not None:
if getattr(self.args, "hypernetwork", False):
filtered_inputs = (filtered_inputs, context)
else:
filtered_inputs = (torch.cat([filtered_inputs, context.expand(filtered_inputs.size(0), context.size(1))], -1),)
else:
filtered_inputs = (filtered_inputs, )
inputs['feat'] = self.feature_field(*filtered_inputs)
if 'sigma' in outputs:
assert 'feat' in inputs, "feature must be pre-computed"
inputs['sigma'] = self.predictor(inputs['feat'])[0]
if ('normal' not in inputs) and (
(('texture' in outputs) and ("normal" in self.tex_filters))
or ("normal" in outputs)):
assert 'sigma' in inputs, "sigma must be pre-computed"
assert 'pos' in inputs, "position is used to compute sigma"
grad_pos, = grad(
outputs=inputs['sigma'], inputs=inputs['pos'],
grad_outputs=torch.ones_like(inputs['sigma'], requires_grad=False),
retain_graph=True, create_graph=True)
if not getattr(self.args, "no_normalize_normal", False):
inputs['normal'] = F.normalize(-grad_pos, p=2, dim=1) # BUG: gradient direction reversed.
else:
inputs['normal'] = -grad_pos # no normalization. magnitude also has information?
if 'texture' in outputs:
filtered_inputs = []
if self.zero_z == 1:
inputs['feat'] = inputs['feat'] * 0.0 # zero-out latent feature
inputs['feat_n2'] = (inputs['feat'] ** 2).sum(-1)
for i, name in enumerate(self.tex_filters):
d_in, func = self.tex_ori_dims[i], self.tex_filters[name]
assert (name in inputs), "the encoder must contain target inputs"
filtered_inputs += [func(inputs[name])] if name != 'sigma' else [func(inputs[name].unsqueeze(-1))]
filtered_inputs = torch.cat(filtered_inputs, -1)
inputs['texture'] = self.renderer(filtered_inputs)
if self.min_color == 0:
inputs['texture'] = torch.sigmoid(inputs['texture'])
return inputs
@register_field('sdf_radiance_field')
class SDFRaidanceField(RaidanceField):
@staticmethod
def add_args(parser):
parser.add_argument('--reg-z', action='store_true', help='regularize latent feature')
parser.add_argument('--dropout-z', type=float, default=0.0)
parser.add_argument('--project-to-surface', action='store_true',
help='project any point to the surface to obtain color.')
RaidanceField.add_args(parser)
def build_feature_field(self, args):
den_feat_dim = self.tex_input_dims[0]
den_input_dim, tex_input_dim = sum(self.den_input_dims), sum(self.tex_input_dims)
assert not getattr(args, "hypernetwork", False), "does not support hypernetwork for now"
assert (den_input_dim == 3) or (
self.den_filters['pos'].cat_input and len(self.den_filters) == 1), "cat pos in the end"
num_layers = args.feature_layers + 2 if not self.nerf_style else 8
skips = self.skips if not self.nerf_style else [4]
self.feature_field = ImplicitField(
den_input_dim, den_feat_dim + 1, args.feature_embed_dim, # +1 is for SDF values
num_layers, with_ln=False, skips=skips, outmost_linear=True, spec_init=False)
if getattr(args, "dropout_z", 0.0) > 0.0:
self.dropout_z = nn.Dropout(p=self.args.dropout_z)
else:
self.dropout_z = None
"""
Geometric initialization from https://arxiv.org/pdf/1911.10414.pdf
This enforce a model to approximate a SDF function:
f(x; \theta) \approx |x| - 1
"""
bias = 1.0
for l in range(num_layers):
lin = self.feature_field.net[l]
if l < num_layers - 1:
lin = lin.net[0]
if l == num_layers - 1: # last layer
torch.nn.init.normal_(lin.weight, mean=math.sqrt(math.pi) / math.sqrt(lin.weight.size(1)), std=0.0001)
torch.nn.init.constant_(lin.bias, -bias)
elif l == 0:
torch.nn.init.constant_(lin.bias, 0.0)
if den_input_dim > 3:
torch.nn.init.constant_(lin.weight[:, :-3], 0.0)
torch.nn.init.normal_(lin.weight[:, -3:], 0.0, math.sqrt(2) / math.sqrt(lin.weight.size(0)))
elif (l - 1) in skips:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, math.sqrt(2) / math.sqrt(lin.weight.size(0)))
torch.nn.init.constant_(lin.weight[:, :den_input_dim-3], 0.0)
else:
torch.nn.init.constant_(lin.bias, 0.0)
torch.nn.init.normal_(lin.weight, 0.0, math.sqrt(2) / math.sqrt(lin.weight.size(0)))
# force the initial fearures to 0
self.feature_field.net[7].weight.data[1:] = self.feature_field.net[7].weight.data[1:] * 0.0
self.feature_field.net[7].bias.data[1:] = self.feature_field.net[7].bias.data[1:] * 0.0
def build_density_predictor(self, args):
class Sdf2Densityv1(nn.Module):
def __init__(self):
super().__init__()
self.alpha = nn.Parameter(torch.scalar_tensor(10.0), requires_grad=True)
self.sigma = nn.Parameter(torch.scalar_tensor(50.0), requires_grad=True)
def forward(self, x):
return self.sigma * torch.tanh(-torch.abs(self.alpha) * x[:, 0]), None
class Sdf2Densityv2(nn.Module):
def __init__(self):
super().__init__()
self.sigma = nn.Parameter(torch.scalar_tensor(50.0), requires_grad=True)
# self.alpha = nn.Parameter(torch.scalar_tensor(0.05), requires_grad=True)
def forward(self, x):
return -self.sigma * x[:, 0], None
class Sdf2Densityv3(nn.Module):
def __init__(self):
super().__init__()
self.sigma = nn.Parameter(torch.scalar_tensor(100.0), requires_grad=True)
# self.alpha = nn.Parameter(torch.scalar_tensor(0.05), requires_grad=True)
def forward(self, x):
return F.elu(-self.sigma * x[:, 0]), None
self.predictor = Sdf2Densityv1()
@torch.enable_grad()
def forward(self, inputs, outputs=['sigma', 'texture']):
if ('sigma' in outputs):
inputs = super().forward(inputs, ['sigma'])
inputs['sdf'] = inputs['feat'][:, 0]
inputs['feat'] = inputs['feat'][:, 1:] # remove sdf from feature
if (getattr(self.args, "zero_z_steps", 0) > self.updates) and self.training:
inputs['feat'] = inputs['feat'] * 0.0 # zero-out latent feature
if self.dropout_z is not None:
inputs['feat'] = self.dropout_z(inputs['feat']) # apply dropout on the feature.
if ('texture' in outputs) or ('normal' in outputs):
# compute gradient for sdf, no need to normalize them
inputs['normal'] = grad(
outputs=inputs['sdf'], inputs=inputs['pos'],
grad_outputs=torch.ones_like(inputs['sdf'], requires_grad=False),
retain_graph=True, create_graph=True)[0]
# compute color for points projected on the surface
if getattr(self.args, "project_to_surface", False):
inputs['pos'] = inputs['pos'] - inputs['sdf'][:, None] * inputs['normal']
inputs['feat'] = None
inputs = super().forward(inputs, outputs=['feat'])
inputs['feat'] = inputs['feat'][:, 1:]
inputs['feat_n2'] = (inputs['feat'] ** 2).sum(-1)
if 'texture' in outputs:
inputs = super().forward(inputs, ['texture'])
return inputs
@register_field('disentangled_radiance_field')
class DisentangledRaidanceField(RaidanceField):
def __init__(self, args):
super().__init__(args)
# for now we fix the input types
assert [name for name in self.tex_filters][:4] == ['feat', 'pos', 'normal', 'ray']
lt_in_dim = self.tex_input_dims[2] + self.tex_input_dims[3]
lg_in_dim = self.tex_input_dims[0] + self.tex_input_dims[1]
if len(self.tex_filters) > 4:
lt_in_dim += sum(self.tex_input_dims[4:])
lg_in_dim += sum(self.tex_input_dims[4:])
# rebuild the renderer
self.D = getattr(args, "compressed_light_dim", 64) # D
self.renderer = nn.ModuleDict(
{
"light-transport": nn.Sequential(
ImplicitField(
in_dim=lt_in_dim,
out_dim=self.D * 3,
hidden_dim=args.texture_embed_dim,
num_layers=args.texture_layers,
outmost_linear=True
), nn.Sigmoid()), # f(v, n, w)
"lighting": nn.Sequential(
ImplicitField(
in_dim=lg_in_dim,
out_dim=self.D * 3,
hidden_dim=args.texture_embed_dim,
num_layers=args.texture_layers,
outmost_linear=True
), nn.ReLU()), # v(x, z, w)
}
)
@staticmethod
def add_args(parser):
RaidanceField.add_args(parser)
parser.add_argument('---compressed-light-dim', type=int,
help='instead of sampling light directions physically, we compressed the light directions')
@torch.enable_grad() # tracking the gradient in case we need to have normal at testing time.
def forward(self, inputs, outputs=['sigma', 'texture']):
h_g, h_brdf, h_l = None, None, None
if inputs.get('context', None) is not None:
h_g, h_brdf, h_l = [inputs['context'][k:k+1] for k in range(3)]
inputs['context'] = h_g
inputs = super().forward(inputs, outputs=['sigma', 'normal'])
if 'texture' in outputs:
lt_inputs = [self.tex_filters['normal'](inputs['normal']), self.tex_filters['ray'](inputs['ray'])]
if h_brdf is not None:
lt_inputs += [self.tex_filters['context'](h_brdf).expand(lt_inputs[0].size(0), -1)]
li_inputs = [self.tex_filters['feat'](inputs['feat']), self.tex_filters['pos'](inputs['pos'])]
if h_l is not None:
li_inputs += [self.tex_filters['context'](h_l).expand(li_inputs[0].size(0), -1)]
lt = self.renderer['light-transport'](torch.cat(lt_inputs, -1)).reshape(-1, self.D, 3)
li = self.renderer['lighting'](torch.cat(li_inputs, -1)).reshape(-1, self.D, 3)
texture = (lt * li).mean(1)
if self.min_color == -1:
texture = 2 * texture - 1
inputs['texture'] = texture
return inputs | 21,863 | 45.322034 | 131 | py |
NSVF | NSVF-main/fairnr/modules/encoder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import numpy as np
import math
import sys
import os
import math
import logging
logger = logging.getLogger(__name__)
from pathlib import Path
from plyfile import PlyData, PlyElement
from fairnr.data.data_utils import load_matrix
from fairnr.data.geometry import (
trilinear_interp, splitting_points, offset_points,
get_edge, build_easy_octree, discretize_points
)
from fairnr.clib import (
aabb_ray_intersect, triangle_ray_intersect, svo_ray_intersect,
uniform_ray_sampling, inverse_cdf_sampling
)
from fairnr.modules.module_utils import (
FCBlock, Linear, Embedding,
InvertableMapping
)
MAX_DEPTH = 10000.0
ENCODER_REGISTRY = {}
def register_encoder(name):
def register_encoder_cls(cls):
if name in ENCODER_REGISTRY:
raise ValueError('Cannot register duplicate module ({})'.format(name))
ENCODER_REGISTRY[name] = cls
return cls
return register_encoder_cls
def get_encoder(name):
if name not in ENCODER_REGISTRY:
raise ValueError('Cannot find module {}'.format(name))
return ENCODER_REGISTRY[name]
@register_encoder('abstract_encoder')
class Encoder(nn.Module):
"""
backbone network
"""
def __init__(self, args):
super().__init__()
self.args = args
def forward(self, **kwargs):
raise NotImplementedError
@staticmethod
def add_args(parser):
pass
@register_encoder('volume_encoder')
class VolumeEncoder(Encoder):
def __init__(self, args):
super().__init__(args)
self.context = None
@staticmethod
def add_args(parser):
parser.add_argument('--near', type=float, help='near distance of the volume')
parser.add_argument('--far', type=float, help='far distance of the volume')
def precompute(self, id=None, context=None, *args, **kwargs):
self.context = context # save context which maybe useful later
return {} # we do not use encoder for NeRF
def ray_intersect(self, ray_start, ray_dir, encoder_states, near=None, far=None):
S, V, P, _ = ray_dir.size()
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
near = near if near is not None else self.args.near
far = far if far is not None else self.args.far
intersection_outputs = {
"min_depth": ray_dir.new_ones(S, V * P, 1) * near,
"max_depth": ray_dir.new_ones(S, V * P, 1) * far,
"probs": ray_dir.new_ones(S, V * P, 1),
"steps": ray_dir.new_ones(S, V * P) * self.args.fixed_num_samples,
"intersected_voxel_idx": ray_dir.new_zeros(S, V * P, 1).int()}
hits = ray_dir.new_ones(S, V * P).bool()
return ray_start, ray_dir, intersection_outputs, hits
def ray_sample(self, intersection_outputs):
sampled_idx, sampled_depth, sampled_dists = inverse_cdf_sampling(
intersection_outputs['intersected_voxel_idx'],
intersection_outputs['min_depth'],
intersection_outputs['max_depth'],
intersection_outputs['probs'],
intersection_outputs['steps'], -1, (not self.training))
return {
'sampled_point_depth': sampled_depth,
'sampled_point_distance': sampled_dists,
'sampled_point_voxel_idx': sampled_idx, # dummy index (to match raymarcher)
}
def forward(self, samples, encoder_states):
inputs = {
'pos': samples['sampled_point_xyz'].requires_grad_(True),
'ray': samples['sampled_point_ray_direction'],
'dists': samples['sampled_point_distance']
}
if self.context is not None:
inputs.update({'context': self.context})
return inputs
@register_encoder('infinite_volume_encoder')
class InfiniteVolumeEncoder(VolumeEncoder):
def __init__(self, args):
super().__init__(args)
self.imap = InvertableMapping(style='simple')
self.nofixdz = getattr(args, "no_fix_dz", False)
self.sample_msi = getattr(args, "sample_msi", False)
@staticmethod
def add_args(parser):
VolumeEncoder.add_args(parser)
parser.add_argument('--no-fix-dz', action='store_true', help='do not fix dz.')
parser.add_argument('--sample-msi', action='store_true')
def ray_intersect(self, ray_start, ray_dir, encoder_states):
S, V, P, _ = ray_dir.size()
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
# ray sphere (unit) intersection (assuming all camera is inside sphere):
p_v = (ray_start * ray_dir).sum(-1)
p_p = (ray_start * ray_start).sum(-1)
d_u = -p_v + torch.sqrt(p_v ** 2 - p_p + 1)
intersection_outputs = {
"min_depth": torch.arange(-1, 1, 1, dtype=ray_dir.dtype, device=ray_dir.device)[None, None, :].expand(S, V * P, 2),
"max_depth": torch.arange( 0, 2, 1, dtype=ray_dir.dtype, device=ray_dir.device)[None, None, :].expand(S, V * P, 2),
"probs": ray_dir.new_ones(S, V * P, 2) * .5,
"steps": ray_dir.new_ones(S, V * P, 1) * self.args.fixed_num_samples,
"intersected_voxel_idx": torch.arange( 0, 2, 1, device=ray_dir.device)[None, None, :].expand(S, V * P, 2).int(),
"unit_sphere_depth": d_u,
"p_v": p_v, "p_p": p_p}
hits = ray_dir.new_ones(S, V * P).bool()
return ray_start, ray_dir, intersection_outputs, hits
def ray_sample(self, intersection_outputs):
samples = super().ray_sample(intersection_outputs) # HACK: < 1, unit sphere; > 1, outside the sphere
# map from (0, 1) to (0, +inf) with invertable mapping
samples['original_point_distance'] = samples['sampled_point_distance'].clone()
samples['original_point_depth'] = samples['sampled_point_depth'].clone()
# assign correct depth
in_depth = intersection_outputs['unit_sphere_depth'][:, None] * (
samples['original_point_depth'].clamp(max=0.0) + 1.0).masked_fill(samples['sampled_point_voxel_idx'].ne(0), 0)
if not self.sample_msi:
out_depth = (intersection_outputs['unit_sphere_depth'][:, None] + 1 / (1 - samples['original_point_depth'].clamp(min=0.0) + 1e-7) - 1
).masked_fill(samples['sampled_point_voxel_idx'].ne(1), 0)
else:
p_v, p_p = intersection_outputs['p_v'][:, None], intersection_outputs['p_p'][:, None]
out_depth = (-p_v + torch.sqrt(p_v ** 2 - p_p + 1. / (1. - samples['original_point_depth'].clamp(min=0.0) + 1e-7) ** 2)
).masked_fill(samples['sampled_point_voxel_idx'].ne(1), 0)
samples['sampled_point_depth'] = in_depth + out_depth
if not self.nofixdz:
# raise NotImplementedError("need to re-compute later")
in_dists = 1 / intersection_outputs['unit_sphere_depth'][:, None] * (samples['original_point_distance']).masked_fill(
samples['sampled_point_voxel_idx'].ne(0), 0)
alpha = 1. if not self.sample_msi else 1. / torch.sqrt(1. + (p_v ** 2 - p_p) * (1. - samples['original_point_depth'].clamp(min=0.0) + 1e-7) ** 2)
out_dists = alpha / ((1 - samples['original_point_depth'].clamp(min=0.0)) ** 2 + 1e-7) * (samples['original_point_distance']).masked_fill(
samples['sampled_point_voxel_idx'].ne(1), 0)
samples['sampled_point_distance'] = in_dists + out_dists
else:
samples['sampled_point_distance'] = samples['sampled_point_distance'].scatter(1,
samples['sampled_point_voxel_idx'].ne(-1).sum(-1, keepdim=True) - 1, 1e8)
return samples
def forward(self, samples, encoder_states):
field_inputs = super().forward(samples, encoder_states)
r = field_inputs['pos'].norm(p=2, dim=-1, keepdim=True) # .clamp(min=1.0)
field_inputs['pos'] = torch.cat([field_inputs['pos'] / (r + 1e-8), r / (1.0 + r)], dim=-1)
return field_inputs
@register_encoder('sparsevoxel_encoder')
class SparseVoxelEncoder(Encoder):
def __init__(self, args, voxel_path=None, bbox_path=None, shared_values=None):
super().__init__(args)
# read initial voxels or learned sparse voxels
self.voxel_path = voxel_path if voxel_path is not None else args.voxel_path
self.bbox_path = bbox_path if bbox_path is not None else getattr(args, "initial_boundingbox", None)
assert (self.bbox_path is not None) or (self.voxel_path is not None), \
"at least initial bounding box or pretrained voxel files are required."
self.voxel_index = None
self.scene_scale = getattr(args, "scene_scale", 1.0)
if self.voxel_path is not None:
# read voxel file
assert os.path.exists(self.voxel_path), "voxel file must exist"
if Path(self.voxel_path).suffix == '.ply':
from plyfile import PlyData, PlyElement
plyvoxel = PlyData.read(self.voxel_path)
elements = [x.name for x in plyvoxel.elements]
assert 'vertex' in elements
plydata = plyvoxel['vertex']
fine_points = torch.from_numpy(
np.stack([plydata['x'], plydata['y'], plydata['z']]).astype('float32').T)
if 'face' in elements:
# read voxel meshes... automatically detect voxel size
faces = plyvoxel['face']['vertex_indices']
t = fine_points[faces[0].astype('int64')]
voxel_size = torch.abs(t[0] - t[1]).max()
# indexing voxel vertices
fine_points = torch.unique(fine_points, dim=0)
# vertex_ids, _ = discretize_points(fine_points, voxel_size)
# vertex_ids_offset = vertex_ids + 1
# # simple hashing
# vertex_ids = vertex_ids[:, 0] * 1000000 + vertex_ids[:, 1] * 1000 + vertex_ids[:, 2]
# vertex_ids_offset = vertex_ids_offset[:, 0] * 1000000 + vertex_ids_offset[:, 1] * 1000 + vertex_ids_offset[:, 2]
# vertex_ids = {k: True for k in vertex_ids.tolist()}
# vertex_inside = [v in vertex_ids for v in vertex_ids_offset.tolist()]
# # get voxel centers
# fine_points = fine_points[torch.tensor(vertex_inside)] + voxel_size * .5
# fine_points = fine_points + voxel_size * .5 --> use all corners as centers
else:
# voxel size must be provided
assert getattr(args, "voxel_size", None) is not None, "final voxel size is essential."
voxel_size = args.voxel_size
if 'quality' in elements:
self.voxel_index = torch.from_numpy(plydata['quality']).long()
else:
# supporting the old style .txt voxel points
fine_points = torch.from_numpy(np.loadtxt(self.voxel_path)[:, 3:].astype('float32'))
else:
# read bounding-box file
bbox = np.loadtxt(self.bbox_path)
voxel_size = bbox[-1] if getattr(args, "voxel_size", None) is None else args.voxel_size
fine_points = torch.from_numpy(bbox2voxels(bbox[:6], voxel_size))
half_voxel = voxel_size * .5
# transform from voxel centers to voxel corners (key/values)
fine_coords, _ = discretize_points(fine_points, half_voxel)
fine_keys0 = offset_points(fine_coords, 1.0).reshape(-1, 3)
fine_keys, fine_feats = torch.unique(fine_keys0, dim=0, sorted=True, return_inverse=True)
fine_feats = fine_feats.reshape(-1, 8)
num_keys = torch.scalar_tensor(fine_keys.size(0)).long()
# ray-marching step size
if getattr(args, "raymarching_stepsize_ratio", 0) > 0:
step_size = args.raymarching_stepsize_ratio * voxel_size
else:
step_size = args.raymarching_stepsize
# register parameters (will be saved to checkpoints)
self.register_buffer("points", fine_points) # voxel centers
self.register_buffer("keys", fine_keys.long()) # id used to find voxel corners/embeddings
self.register_buffer("feats", fine_feats.long()) # for each voxel, 8 voxel corner ids
self.register_buffer("num_keys", num_keys)
self.register_buffer("keep", fine_feats.new_ones(fine_feats.size(0)).long()) # whether the voxel will be pruned
self.register_buffer("voxel_size", torch.scalar_tensor(voxel_size))
self.register_buffer("step_size", torch.scalar_tensor(step_size))
self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
logger.info("loaded {} voxel centers, {} voxel corners".format(fine_points.size(0), num_keys))
# set-up other hyperparameters and initialize running time caches
self.embed_dim = getattr(args, "voxel_embed_dim", None)
self.deterministic_step = getattr(args, "deterministic_step", False)
self.use_octree = getattr(args, "use_octree", False)
self.track_max_probs = getattr(args, "track_max_probs", False)
self._runtime_caches = {
"flatten_centers": None,
"flatten_children": None,
"max_voxel_probs": None
}
# sparse voxel embeddings
if shared_values is None and self.embed_dim > 0:
self.values = Embedding(num_keys, self.embed_dim, None)
else:
self.values = shared_values
def upgrade_state_dict_named(self, state_dict, name):
# update the voxel embedding shapes
if self.values is not None:
loaded_values = state_dict[name + '.values.weight']
self.values.weight = nn.Parameter(self.values.weight.new_zeros(*loaded_values.size()))
self.values.num_embeddings = self.values.weight.size(0)
self.total_size = self.values.weight.size(0)
self.num_keys = self.num_keys * 0 + self.total_size
if self.voxel_index is not None:
state_dict[name + '.points'] = state_dict[name + '.points'][self.voxel_index]
state_dict[name + '.feats'] = state_dict[name + '.feats'][self.voxel_index]
state_dict[name + '.keep'] = state_dict[name + '.keep'][self.voxel_index]
# update the buffers shapes
if name + '.points' in state_dict:
self.points = self.points.new_zeros(*state_dict[name + '.points'].size())
self.feats = self.feats.new_zeros(*state_dict[name + '.feats'].size())
self.keys = self.keys.new_zeros(*state_dict[name + '.keys'].size())
self.keep = self.keep.new_zeros(*state_dict[name + '.keep'].size())
else:
# this usually happens when loading a NeRF checkpoint to NSVF
# use initialized values
state_dict[name + '.points'] = self.points
state_dict[name + '.feats'] = self.feats
state_dict[name + '.keys'] = self.keys
state_dict[name + '.keep'] = self.keep
state_dict[name + '.voxel_size'] = self.voxel_size
state_dict[name + '.step_size'] = self.step_size
state_dict[name + '.max_hits'] = self.max_hits
state_dict[name + '.num_keys'] = self.num_keys
@staticmethod
def add_args(parser):
parser.add_argument('--initial-boundingbox', type=str, help='the initial bounding box to initialize the model')
parser.add_argument('--voxel-size', type=float, metavar='D', help='voxel size of the input points (initial')
parser.add_argument('--voxel-path', type=str, help='path for pretrained voxel file. if provided no update')
parser.add_argument('--voxel-embed-dim', type=int, metavar='N', help="embedding size")
parser.add_argument('--deterministic-step', action='store_true',
help='if set, the model runs fixed stepsize, instead of sampling one')
parser.add_argument('--max-hits', type=int, metavar='N', help='due to restrictions we set a maximum number of hits')
parser.add_argument('--raymarching-stepsize', type=float, metavar='D',
help='ray marching step size for sparse voxels')
parser.add_argument('--raymarching-stepsize-ratio', type=float, metavar='D',
help='if the concrete step size is not given (=0), we use the ratio to the voxel size as step size.')
parser.add_argument('--use-octree', action='store_true', help='if set, instead of looping over the voxels, we build an octree.')
parser.add_argument('--track-max-probs', action='store_true', help='if set, tracking the maximum probability in ray-marching.')
parser.add_argument('--scene-scale', type=float, default=1.0)
def reset_runtime_caches(self):
logger.info("reset chache")
if self.use_octree:
points = self.points[self.keep.bool()]
centers, children = build_easy_octree(points, self.voxel_size / 2.0)
self._runtime_caches['flatten_centers'] = centers
self._runtime_caches['flatten_children'] = children
if self.track_max_probs:
self._runtime_caches['max_voxel_probs'] = self.points.new_zeros(self.points.size(0))
def clean_runtime_caches(self):
logger.info("clean chache")
for name in self._runtime_caches:
self._runtime_caches[name] = None
def precompute(self, id=None, *args, **kwargs):
feats = self.feats[self.keep.bool()]
points = self.points[self.keep.bool()]
points[:, 0] += (self.voxel_size / 10)
values = self.values.weight[: self.num_keys] if self.values is not None else None
if id is not None:
# extend size to support multi-objects
feats = feats.unsqueeze(0).expand(id.size(0), *feats.size()).contiguous()
points = points.unsqueeze(0).expand(id.size(0), *points.size()).contiguous()
values = values.unsqueeze(0).expand(id.size(0), *values.size()).contiguous() if values is not None else None
# moving to multiple objects
if id.size(0) > 1:
feats = feats + self.num_keys * torch.arange(id.size(0),
device=feats.device, dtype=feats.dtype)[:, None, None]
encoder_states = {
'voxel_vertex_idx': feats,
'voxel_center_xyz': points,
'voxel_vertex_emb': values
}
if self.use_octree:
flatten_centers, flatten_children = self.flatten_centers.clone(), self.flatten_children.clone()
if id is not None:
flatten_centers = flatten_centers.unsqueeze(0).expand(id.size(0), *flatten_centers.size()).contiguous()
flatten_children = flatten_children.unsqueeze(0).expand(id.size(0), *flatten_children.size()).contiguous()
encoder_states['voxel_octree_center_xyz'] = flatten_centers
encoder_states['voxel_octree_children_idx'] = flatten_children
return encoder_states
@torch.no_grad()
def export_voxels(self, return_mesh=False):
logger.info("exporting learned sparse voxels...")
voxel_idx = torch.arange(self.keep.size(0), device=self.keep.device)
voxel_idx = voxel_idx[self.keep.bool()]
voxel_pts = self.points[self.keep.bool()]
if not return_mesh:
# HACK: we export the original voxel indices as "quality" in case for editing
points = [
(voxel_pts[k, 0], voxel_pts[k, 1], voxel_pts[k, 2], voxel_idx[k])
for k in range(voxel_idx.size(0))
]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('quality', 'f4')])
return PlyData([PlyElement.describe(vertex, 'vertex')])
else:
# generate polygon for voxels
center_coords, residual = discretize_points(voxel_pts, self.voxel_size / 2)
offsets = torch.tensor([[-1,-1,-1],[-1,-1,1],[-1,1,-1],[1,-1,-1],[1,1,-1],[1,-1,1],[-1,1,1],[1,1,1]], device=center_coords.device)
vertex_coords = center_coords[:, None, :] + offsets[None, :, :]
vertex_points = vertex_coords.type_as(residual) * self.voxel_size / 2 + residual
faceidxs = [[1,6,7,5],[7,6,2,4],[5,7,4,3],[1,0,2,6],[1,5,3,0],[0,3,4,2]]
all_vertex_keys, all_vertex_idxs = {}, []
for i in range(vertex_coords.shape[0]):
for j in range(8):
key = " ".join(["{}".format(int(p)) for p in vertex_coords[i,j]])
if key not in all_vertex_keys:
all_vertex_keys[key] = vertex_points[i,j]
all_vertex_idxs += [key]
all_vertex_dicts = {key: u for u, key in enumerate(all_vertex_idxs)}
all_faces = torch.stack([torch.stack([vertex_coords[:, k] for k in f]) for f in faceidxs]).permute(2,0,1,3).reshape(-1,4,3)
all_faces_keys = {}
for l in range(all_faces.size(0)):
key = " ".join(["{}".format(int(p)) for p in all_faces[l].sum(0) // 4])
if key not in all_faces_keys:
all_faces_keys[key] = all_faces[l]
vertex = np.array([tuple(all_vertex_keys[key].cpu().tolist()) for key in all_vertex_idxs], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
face = np.array([([all_vertex_dicts["{} {} {}".format(*b)] for b in a.cpu().tolist()],) for a in all_faces_keys.values()],
dtype=[('vertex_indices', 'i4', (4,))])
return PlyData([PlyElement.describe(vertex, 'vertex'), PlyElement.describe(face, 'face')])
@torch.no_grad()
def export_surfaces(self, field_fn, th, bits):
"""
extract triangle-meshes from the implicit field using marching cube algorithm
Lewiner, Thomas, et al. "Efficient implementation of marching cubes' cases with topological guarantees."
Journal of graphics tools 8.2 (2003): 1-15.
"""
logger.info("marching cube...")
encoder_states = self.precompute(id=None)
points = encoder_states['voxel_center_xyz']
scores = self.get_scores(field_fn, th=th, bits=bits, encoder_states=encoder_states)
coords, residual = discretize_points(points, self.voxel_size)
A, B, C = [s + 1 for s in coords.max(0).values.cpu().tolist()]
# prepare grids
full_grids = points.new_ones(A * B * C, bits ** 3)
full_grids[coords[:, 0] * B * C + coords[:, 1] * C + coords[:, 2]] = scores
full_grids = full_grids.reshape(A, B, C, bits, bits, bits)
full_grids = full_grids.permute(0, 3, 1, 4, 2, 5).reshape(A * bits, B * bits, C * bits)
full_grids = 1 - full_grids
# marching cube
from skimage import measure
space_step = self.voxel_size.item() / bits
verts, faces, normals, _ = measure.marching_cubes_lewiner(
volume=full_grids.cpu().numpy(), level=0.5,
spacing=(space_step, space_step, space_step)
)
verts += (residual - (self.voxel_size / 2)).cpu().numpy()
verts = np.array([tuple(a) for a in verts.tolist()], dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
faces = np.array([(a, ) for a in faces.tolist()], dtype=[('vertex_indices', 'i4', (3,))])
return PlyData([PlyElement.describe(verts, 'vertex'), PlyElement.describe(faces, 'face')])
def get_edge(self, ray_start, ray_dir, samples, encoder_states):
outs = get_edge(
ray_start + ray_dir * samples['sampled_point_depth'][:, :1],
encoder_states['voxel_center_xyz'].reshape(-1, 3)[samples['sampled_point_voxel_idx'][:, 0].long()],
self.voxel_size).type_as(ray_dir) # get voxel edges/depth (for visualization)
outs = (1 - outs[:, None].expand(outs.size(0), 3)) * 0.7
return outs
def ray_intersect(self, ray_start, ray_dir, encoder_states):
point_feats = encoder_states['voxel_vertex_idx']
point_xyz = encoder_states['voxel_center_xyz']
S, V, P, _ = ray_dir.size()
_, H, D = point_feats.size()
# ray-voxel intersection
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
if self.use_octree: # ray-voxel intersection with SVO
flatten_centers = encoder_states['voxel_octree_center_xyz']
flatten_children = encoder_states['voxel_octree_children_idx']
pts_idx, min_depth, max_depth = svo_ray_intersect(
self.voxel_size, self.max_hits, flatten_centers, flatten_children,
ray_start, ray_dir)
else: # ray-voxel intersection with all voxels
pts_idx, min_depth, max_depth = aabb_ray_intersect(
self.voxel_size, self.max_hits, point_xyz, ray_start, ray_dir)
# sort the depths
min_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
max_depth.masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
min_depth, sorted_idx = min_depth.sort(dim=-1)
max_depth = max_depth.gather(-1, sorted_idx)
pts_idx = pts_idx.gather(-1, sorted_idx)
hits = pts_idx.ne(-1).any(-1) # remove all points that completely miss the object
if S > 1: # extend the point-index to multiple shapes (just in case)
pts_idx = (pts_idx + H * torch.arange(S,
device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
).masked_fill_(pts_idx.eq(-1), -1)
intersection_outputs = {
"min_depth": min_depth,
"max_depth": max_depth,
"intersected_voxel_idx": pts_idx
}
return ray_start, ray_dir, intersection_outputs, hits
def ray_sample(self, intersection_outputs):
# sample points and use middle point approximation
sampled_idx, sampled_depth, sampled_dists = inverse_cdf_sampling(
intersection_outputs['intersected_voxel_idx'],
intersection_outputs['min_depth'],
intersection_outputs['max_depth'],
intersection_outputs['probs'],
intersection_outputs['steps'],
-1, self.deterministic_step or (not self.training))
sampled_dists = sampled_dists.clamp(min=0.0)
sampled_depth.masked_fill_(sampled_idx.eq(-1), MAX_DEPTH)
sampled_dists.masked_fill_(sampled_idx.eq(-1), 0.0)
samples = {
'sampled_point_depth': sampled_depth,
'sampled_point_distance': sampled_dists,
'sampled_point_voxel_idx': sampled_idx,
}
return samples
@torch.enable_grad()
def forward(self, samples, encoder_states):
# encoder states
point_feats = encoder_states['voxel_vertex_idx']
point_xyz = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
# ray point samples
sampled_idx = samples['sampled_point_voxel_idx'].long()
sampled_xyz = samples['sampled_point_xyz'].requires_grad_(True)
sampled_dir = samples['sampled_point_ray_direction']
sampled_dis = samples['sampled_point_distance']
# prepare inputs for implicit field
# / self.scene_scale
inputs = {
'pos': sampled_xyz,
'ray': sampled_dir,
'dists': sampled_dis}
# --- just for debugging ---- #
# r = inputs['pos'].norm(p=2, dim=-1, keepdim=True)
# inputs['pos'] = torch.cat([inputs['pos'] / (r + 1e-8), r / (1 + r)], dim=-1)
if values is not None:
# resample point features
point_xyz = F.embedding(sampled_idx, point_xyz)
point_feats = F.embedding(F.embedding(sampled_idx, point_feats), values).view(point_xyz.size(0), -1)
# tri-linear interpolation
p = ((sampled_xyz - point_xyz) / self.voxel_size + .5).unsqueeze(1)
q = offset_points(p, .5, offset_only=True).unsqueeze(0) + .5 # BUG (FIX)
inputs.update({'emb': trilinear_interp(p, q, point_feats)})
return inputs
@torch.no_grad()
def track_voxel_probs(self, voxel_idxs, voxel_probs):
voxel_idxs = voxel_idxs.masked_fill(voxel_idxs.eq(-1), self.max_voxel_probs.size(0))
chunk_size = 4096
for start in range(0, voxel_idxs.size(0), chunk_size):
end = start + chunk_size
end = end if end < voxel_idxs.size(0) else voxel_idxs.size(0)
max_voxel_probs = self.max_voxel_probs.new_zeros(end-start, self.max_voxel_probs.size(0) + 1).scatter_add_(
dim=-1, index=voxel_idxs[start:end], src=voxel_probs[start:end]).max(0)[0][:-1].data
self.max_voxel_probs = torch.max(self.max_voxel_probs, max_voxel_probs)
@torch.no_grad()
def pruning(self, field_fn, th=0.5, encoder_states=None, train_stats=False):
if not train_stats:
logger.info("pruning...")
scores = self.get_scores(field_fn, th=th, bits=16, encoder_states=encoder_states)
keep = (1 - scores.min(-1)[0]) > th
else:
logger.info("pruning based on training set statics (e.g. probs)...")
if dist.is_initialized() and dist.get_world_size() > 1: # sync on multi-gpus
dist.all_reduce(self.max_voxel_probs, op=dist.ReduceOp.MAX)
keep = self.max_voxel_probs > th
self.keep.masked_scatter_(self.keep.bool(), keep.long())
logger.info("pruning done. # of voxels before: {}, after: {} voxels".format(keep.size(0), keep.sum()))
def get_scores(self, field_fn, th=0.5, bits=16, encoder_states=None):
if encoder_states is None:
encoder_states = self.precompute(id=None)
feats = encoder_states['voxel_vertex_idx']
points = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
chunk_size = 64
def get_scores_once(feats, points, values):
# sample points inside voxels
sampled_xyz = offset_points(points, self.voxel_size / 2.0, bits=bits)
sampled_idx = torch.arange(points.size(0), device=points.device)[:, None].expand(*sampled_xyz.size()[:2])
sampled_xyz, sampled_idx = sampled_xyz.reshape(-1, 3), sampled_idx.reshape(-1)
field_inputs = self.forward(
{'sampled_point_xyz': sampled_xyz,
'sampled_point_voxel_idx': sampled_idx,
'sampled_point_ray_direction': None,
'sampled_point_distance': None},
{'voxel_vertex_idx': feats,
'voxel_center_xyz': points,
'voxel_vertex_emb': values}) # get field inputs
if encoder_states.get('context', None) is not None:
field_inputs['context'] = encoder_states['context']
# evaluation with density
field_outputs = field_fn(field_inputs, outputs=['sigma'])
free_energy = -torch.relu(field_outputs['sigma']).reshape(-1, bits ** 3)
# return scores
return torch.exp(free_energy)
return torch.cat([get_scores_once(feats[i: i + chunk_size], points[i: i + chunk_size], values)
for i in range(0, points.size(0), chunk_size)], 0)
@torch.no_grad()
def splitting(self):
logger.info("splitting...")
encoder_states = self.precompute(id=None)
feats, points, values = encoder_states['voxel_vertex_idx'], encoder_states['voxel_center_xyz'], encoder_states['voxel_vertex_emb']
new_points, new_feats, new_values, new_keys = splitting_points(points, feats, values, self.voxel_size / 2.0)
new_num_keys = new_keys.size(0)
new_point_length = new_points.size(0)
# set new voxel embeddings
if new_values is not None:
self.values.weight = nn.Parameter(new_values)
self.values.num_embeddings = self.values.weight.size(0)
self.total_size = new_num_keys
self.num_keys = self.num_keys * 0 + self.total_size
self.points = new_points
self.feats = new_feats
self.keep = self.keep.new_ones(new_point_length)
logger.info("splitting done. # of voxels before: {}, after: {} voxels".format(points.size(0), self.keep.sum()))
@property
def flatten_centers(self):
if self._runtime_caches['flatten_centers'] is None:
self.reset_runtime_caches()
return self._runtime_caches['flatten_centers']
@property
def flatten_children(self):
if self._runtime_caches['flatten_children'] is None:
self.reset_runtime_caches()
return self._runtime_caches['flatten_children']
@property
def max_voxel_probs(self):
if self._runtime_caches['max_voxel_probs'] is None:
self.reset_runtime_caches()
return self._runtime_caches['max_voxel_probs']
@max_voxel_probs.setter
def max_voxel_probs(self, x):
self._runtime_caches['max_voxel_probs'] = x
@property
def feature_dim(self):
return self.embed_dim
@property
def dummy_loss(self):
if self.values is not None:
return self.values.weight[0,0] * 0.0
return 0.0
@property
def num_voxels(self):
return self.keep.long().sum()
@register_encoder('multi_sparsevoxel_encoder')
class MultiSparseVoxelEncoder(Encoder):
def __init__(self, args):
super().__init__(args)
try:
self.all_voxels = nn.ModuleList(
[SparseVoxelEncoder(args, vox.strip()) for vox in open(args.voxel_path).readlines()])
except TypeError:
bbox_path = getattr(args, "bbox_path", "/private/home/jgu/data/shapenet/disco_dataset/bunny_point.txt")
self.all_voxels = nn.ModuleList(
[SparseVoxelEncoder(args, None, g.strip() + '/bbox.txt') for g in open(bbox_path).readlines()])
# properties
self.deterministic_step = getattr(args, "deterministic_step", False)
self.use_octree = getattr(args, "use_octree", False)
self.track_max_probs = getattr(args, "track_max_probs", False)
self.cid = None
if getattr(self.args, "global_embeddings", None) is not None:
self.global_embed = torch.zeros(*eval(self.args.global_embeddings)).normal_(mean=0, std=0.01)
self.global_embed = nn.Parameter(self.global_embed, requires_grad=True)
else:
self.global_embed = None
@staticmethod
def add_args(parser):
SparseVoxelEncoder.add_args(parser)
parser.add_argument('--bbox-path', type=str, default=None)
parser.add_argument('--global-embeddings', type=str, default=None,
help="""set global embeddings if provided in global.txt. We follow this format:
(N, D) or (K, N, D) if we have multi-dimensional global features.
D is the global feature dimentions.
N is the number of indices of this feature,
and K is the number of features if provided.""")
def reset_runtime_caches(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].reset_runtime_caches()
def clean_runtime_caches(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].clean_runtime_caches()
def precompute(self, id, global_index=None, *args, **kwargs):
# TODO: this is a HACK for simplicity
assert id.size(0) == 1, "for now, only works for one object"
# HACK
# id = id * 0 + 2
self.cid = id[0]
encoder_states = self.all_voxels[id[0]].precompute(id, *args, **kwargs)
if (global_index is not None) and (self.global_embed is not None):
encoder_states['context'] = torch.stack([
F.embedding(global_index[:, i], self.global_embed[i])
for i in range(self.global_embed.size(0))], 1)
return encoder_states
def export_surfaces(self, field_fn, th, bits):
raise NotImplementedError("does not support for now.")
def export_voxels(self, return_mesh=False):
raise NotImplementedError("does not support for now.")
def get_edge(self, *args, **kwargs):
return self.all_voxels[self.cid].get_edge(*args, **kwargs)
def ray_intersect(self, *args, **kwargs):
return self.all_voxels[self.cid].ray_intersect(*args, **kwargs)
def ray_sample(self, *args, **kwargs):
return self.all_voxels[self.cid].ray_sample(*args, **kwargs)
def forward(self, samples, encoder_states):
inputs = self.all_voxels[self.cid].forward(samples, encoder_states)
if encoder_states.get('context', None) is not None:
inputs['context'] = encoder_states['context']
return inputs
def track_voxel_probs(self, voxel_idxs, voxel_probs):
return self.all_voxels[self.cid].track_voxel_probs(voxel_idxs, voxel_probs)
@torch.no_grad()
def pruning(self, field_fn, th=0.5, train_stats=False):
for id in range(len(self.all_voxels)):
self.all_voxels[id].pruning(field_fn, th, train_stats=train_stats)
@torch.no_grad()
def splitting(self):
for id in range(len(self.all_voxels)):
self.all_voxels[id].splitting()
@property
def feature_dim(self):
return self.all_voxels[0].embed_dim
@property
def dummy_loss(self):
return sum([d.dummy_loss for d in self.all_voxels])
@property
def voxel_size(self):
return self.all_voxels[0].voxel_size
@voxel_size.setter
def voxel_size(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].voxel_size = x
@property
def step_size(self):
return self.all_voxels[0].step_size
@step_size.setter
def step_size(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].step_size = x
@property
def max_hits(self):
return self.all_voxels[0].max_hits
@max_hits.setter
def max_hits(self, x):
for id in range(len(self.all_voxels)):
self.all_voxels[id].max_hits = x
@property
def num_voxels(self):
return self.all_voxels[self.cid].num_voxels
@register_encoder('shared_sparsevoxel_encoder')
class SharedSparseVoxelEncoder(MultiSparseVoxelEncoder):
"""
Different from MultiSparseVoxelEncoder, we assume a shared list
of voxels across all models. Usually useful to learn a video sequence.
"""
def __init__(self, args):
super(MultiSparseVoxelEncoder, self).__init__(args)
# using a shared voxel
self.voxel_path = args.voxel_path
self.num_frames = args.num_frames
self.all_voxels = [SparseVoxelEncoder(args, self.voxel_path)]
self.all_voxels = nn.ModuleList(self.all_voxels + [
SparseVoxelEncoder(args, self.voxel_path, shared_values=self.all_voxels[0].values)
for i in range(self.num_frames - 1)])
self.context_embed_dim = args.context_embed_dim
self.contexts = nn.Embedding(self.num_frames, self.context_embed_dim, None)
self.cid = None
@staticmethod
def add_args(parser):
SparseVoxelEncoder.add_args(parser)
parser.add_argument('--num-frames', type=int, help='the total number of frames')
parser.add_argument('--context-embed-dim', type=int, help='context embedding for each view')
def forward(self, samples, encoder_states):
inputs = self.all_voxels[self.cid].forward(samples, encoder_states)
inputs.update({'context': self.contexts(self.cid).unsqueeze(0)})
return inputs
@torch.no_grad()
def pruning(self, field_fn, th=0.5, train_stats=False):
for cid in range(len(self.all_voxels)):
id = torch.tensor([cid], device=self.contexts.weight.device)
encoder_states = {name: v[0] if v is not None else v
for name, v in self.precompute(id).items()}
encoder_states['context'] = self.contexts(id)
self.all_voxels[cid].pruning(field_fn, th,
encoder_states=encoder_states,
train_stats=train_stats)
@torch.no_grad()
def splitting(self):
logger.info("splitting...")
all_feats, all_points = [], []
for id in range(len(self.all_voxels)):
encoder_states = self.all_voxels[id].precompute(id=None)
feats = encoder_states['voxel_vertex_idx']
points = encoder_states['voxel_center_xyz']
values = encoder_states['voxel_vertex_emb']
all_feats.append(feats)
all_points.append(points)
feats, points = torch.cat(all_feats, 0), torch.cat(all_points, 0)
unique_feats, unique_idx = torch.unique(feats, dim=0, return_inverse=True)
unique_points = points[
unique_feats.new_zeros(unique_feats.size(0)).scatter_(
0, unique_idx, torch.arange(unique_idx.size(0), device=unique_feats.device)
)]
new_points, new_feats, new_values, new_keys = splitting_points(unique_points, unique_feats, values, self.voxel_size / 2.0)
new_num_keys = new_keys.size(0)
new_point_length = new_points.size(0)
# set new voxel embeddings (shared voxels)
if values is not None:
self.all_voxels[0].values.weight = nn.Parameter(new_values)
self.all_voxels[0].values.num_embeddings = new_num_keys
for id in range(len(self.all_voxels)):
self.all_voxels[id].total_size = new_num_keys
self.all_voxels[id].num_keys = self.all_voxels[id].num_keys * 0 + self.all_voxels[id].total_size
self.all_voxels[id].points = new_points
self.all_voxels[id].feats = new_feats
self.all_voxels[id].keep = self.all_voxels[id].keep.new_ones(new_point_length)
logger.info("splitting done. # of voxels before: {}, after: {} voxels".format(
unique_points.size(0), new_point_length))
@property
def feature_dim(self):
return self.all_voxels[0].embed_dim + self.context_embed_dim
@register_encoder('triangle_mesh_encoder')
class TriangleMeshEncoder(SparseVoxelEncoder):
"""
Training on fixed mesh model. Cannot pruning..
"""
def __init__(self, args, mesh_path=None, shared_values=None):
super(SparseVoxelEncoder, self).__init__(args)
self.mesh_path = mesh_path if mesh_path is not None else args.mesh_path
assert (self.mesh_path is not None) and os.path.exists(self.mesh_path)
import open3d as o3d
mesh = o3d.io.read_triangle_mesh(self.mesh_path)
vertices = torch.from_numpy(np.asarray(mesh.vertices, dtype=np.float32))
faces = torch.from_numpy(np.asarray(mesh.triangles, dtype=np.long))
step_size = args.raymarching_stepsize
if getattr(args, "raymarching_margin", None) is None:
margin = step_size * 10 # truncated space around the triangle surfaces
else:
margin = args.raymarching_margin
self.register_buffer("margin", torch.scalar_tensor(margin))
self.register_buffer("step_size", torch.scalar_tensor(step_size))
self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))
self.vertices = nn.Parameter(vertices, requires_grad=getattr(args, "trainable_vertices", False))
self.faces = nn.Parameter(faces, requires_grad=False)
# set-up other hyperparameters
self.embed_dim = getattr(args, "voxel_embed_dim", None)
self.deterministic_step = getattr(args, "deterministic_step", False)
self.values = None
self.blur_ratio = getattr(args, "blur_ratio", 0.0)
def upgrade_state_dict_named(self, state_dict, name):
pass
@staticmethod
def add_args(parser):
parser.add_argument('--mesh-path', type=str, help='path for initial mesh file')
parser.add_argument('--voxel-embed-dim', type=int, metavar='N', help="embedding size")
parser.add_argument('--deterministic-step', action='store_true',
help='if set, the model runs fixed stepsize, instead of sampling one')
parser.add_argument('--max-hits', type=int, metavar='N', help='due to restrictions we set a maximum number of hits')
parser.add_argument('--raymarching-stepsize', type=float, metavar='D',
help='ray marching step size for sparse voxels')
parser.add_argument('--raymarching-margin', type=float, default=None,
help='margin around the surface.')
parser.add_argument('--blur-ratio', type=float, default=0,
help="it is possible to shoot outside the triangle. default=0")
parser.add_argument('--trainable-vertices', action='store_true',
help='if set, making the triangle trainable. experimental code. not ideal.')
def precompute(self, id=None, *args, **kwargs):
feats, points, values = self.faces, self.vertices, self.values
if id is not None:
# extend size to support multi-objects
feats = feats.unsqueeze(0).expand(id.size(0), *feats.size()).contiguous()
points = points.unsqueeze(0).expand(id.size(0), *points.size()).contiguous()
values = values.unsqueeze(0).expand(id.size(0), *values.size()).contiguous() if values is not None else None
# moving to multiple objects
if id.size(0) > 1:
feats = feats + points.size(1) * torch.arange(id.size(0),
device=feats.device, dtype=feats.dtype)[:, None, None]
encoder_states = {
'mesh_face_vertex_idx': feats,
'mesh_vertex_xyz': points,
}
return encoder_states
def get_edge(self, ray_start, ray_dir, *args, **kwargs):
return torch.ones_like(ray_dir) * 0.7
@property
def voxel_size(self):
return self.margin
def ray_intersect(self, ray_start, ray_dir, encoder_states):
point_xyz = encoder_states['mesh_vertex_xyz']
point_feats = encoder_states['mesh_face_vertex_idx']
S, V, P, _ = ray_dir.size()
F, G = point_feats.size(1), point_xyz.size(1)
# ray-voxel intersection
ray_start = ray_start.expand_as(ray_dir).contiguous().view(S, V * P, 3).contiguous()
ray_dir = ray_dir.reshape(S, V * P, 3).contiguous()
pts_idx, depth, uv = triangle_ray_intersect(
self.margin, self.blur_ratio, self.max_hits, point_xyz, point_feats, ray_start, ray_dir)
min_depth = (depth[:,:,:,0] + depth[:,:,:,1]).masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
max_depth = (depth[:,:,:,0] + depth[:,:,:,2]).masked_fill_(pts_idx.eq(-1), MAX_DEPTH)
hits = pts_idx.ne(-1).any(-1) # remove all points that completely miss the object
if S > 1: # extend the point-index to multiple shapes (just in case)
pts_idx = (pts_idx + G * torch.arange(S,
device=pts_idx.device, dtype=pts_idx.dtype)[:, None, None]
).masked_fill_(pts_idx.eq(-1), -1)
intersection_outputs = {
"min_depth": min_depth,
"max_depth": max_depth,
"intersected_voxel_idx": pts_idx
}
return ray_start, ray_dir, intersection_outputs, hits
@torch.enable_grad()
def forward(self, samples, encoder_states):
return {
'pos': samples['sampled_point_xyz'].requires_grad_(True),
'ray': samples['sampled_point_ray_direction'],
'dists': samples['sampled_point_distance']
}
@property
def num_voxels(self):
return self.vertices.size(0)
def bbox2voxels(bbox, voxel_size):
vox_min, vox_max = bbox[:3], bbox[3:]
steps = ((vox_max - vox_min) / voxel_size).round().astype('int64') + 1
x, y, z = [c.reshape(-1).astype('float32') for c in np.meshgrid(np.arange(steps[0]), np.arange(steps[1]), np.arange(steps[2]))]
x, y, z = x * voxel_size + vox_min[0], y * voxel_size + vox_min[1], z * voxel_size + vox_min[2]
return np.stack([x, y, z]).T.astype('float32')
| 49,252 | 45.377589 | 157 | py |
NSVF | NSVF-main/fairnr/modules/hyper.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'''
Pytorch implementations of hyper-network modules.
This code is largely adapted from
https://github.com/vsitzmann/scene-representation-networks
'''
import torch
import torch.nn as nn
import functools
from fairnr.modules.module_utils import FCBlock
def partialclass(cls, *args, **kwds):
class NewCls(cls):
__init__ = functools.partialmethod(cls.__init__, *args, **kwds)
return NewCls
class LookupLayer(nn.Module):
def __init__(self, in_ch, out_ch, num_objects):
super().__init__()
self.out_ch = out_ch
self.lookup_lin = LookupLinear(in_ch,
out_ch,
num_objects=num_objects)
self.norm_nl = nn.Sequential(
nn.LayerNorm([self.out_ch], elementwise_affine=False),
nn.ReLU(inplace=True)
)
def forward(self, obj_idx):
net = nn.Sequential(
self.lookup_lin(obj_idx),
self.norm_nl
)
return net
class LookupFC(nn.Module):
def __init__(self,
hidden_ch,
num_hidden_layers,
num_objects,
in_ch,
out_ch,
outermost_linear=False):
super().__init__()
self.layers = nn.ModuleList()
self.layers.append(LookupLayer(in_ch=in_ch, out_ch=hidden_ch, num_objects=num_objects))
for i in range(num_hidden_layers):
self.layers.append(LookupLayer(in_ch=hidden_ch, out_ch=hidden_ch, num_objects=num_objects))
if outermost_linear:
self.layers.append(LookupLinear(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects))
else:
self.layers.append(LookupLayer(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects))
def forward(self, obj_idx):
net = []
for i in range(len(self.layers)):
net.append(self.layers[i](obj_idx))
return nn.Sequential(*net)
class LookupLinear(nn.Module):
def __init__(self,
in_ch,
out_ch,
num_objects):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.hypo_params = nn.Embedding(num_objects, in_ch * out_ch + out_ch)
for i in range(num_objects):
nn.init.kaiming_normal_(self.hypo_params.weight.data[i, :self.in_ch * self.out_ch].view(self.out_ch, self.in_ch),
a=0.0,
nonlinearity='relu',
mode='fan_in')
self.hypo_params.weight.data[i, self.in_ch * self.out_ch:].fill_(0.)
def forward(self, obj_idx):
hypo_params = self.hypo_params(obj_idx)
# Indices explicit to catch erros in shape of output layer
weights = hypo_params[..., :self.in_ch * self.out_ch]
biases = hypo_params[..., self.in_ch * self.out_ch:(self.in_ch * self.out_ch)+self.out_ch]
biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch)
weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch)
return BatchLinear(weights=weights, biases=biases)
class HyperLayer(nn.Module):
'''A hypernetwork that predicts a single Dense Layer, including LayerNorm and a ReLU.'''
def __init__(self,
in_ch,
out_ch,
hyper_in_ch,
hyper_num_hidden_layers,
hyper_hidden_ch):
super().__init__()
self.hyper_linear = HyperLinear(in_ch=in_ch,
out_ch=out_ch,
hyper_in_ch=hyper_in_ch,
hyper_num_hidden_layers=hyper_num_hidden_layers,
hyper_hidden_ch=hyper_hidden_ch)
self.norm_nl = nn.Sequential(
nn.LayerNorm([out_ch], elementwise_affine=False),
nn.ReLU(inplace=True)
)
def forward(self, hyper_input):
'''
:param hyper_input: input to hypernetwork.
:return: nn.Module; predicted fully connected network.
'''
return nn.Sequential(self.hyper_linear(hyper_input), self.norm_nl)
class HyperFC(nn.Module):
'''Builds a hypernetwork that predicts a fully connected neural network.
'''
def __init__(self,
hyper_in_ch,
hyper_num_hidden_layers,
hyper_hidden_ch,
hidden_ch,
num_hidden_layers,
in_ch,
out_ch,
outermost_linear=False):
super().__init__()
PreconfHyperLinear = partialclass(HyperLinear,
hyper_in_ch=hyper_in_ch,
hyper_num_hidden_layers=hyper_num_hidden_layers,
hyper_hidden_ch=hyper_hidden_ch)
PreconfHyperLayer = partialclass(HyperLayer,
hyper_in_ch=hyper_in_ch,
hyper_num_hidden_layers=hyper_num_hidden_layers,
hyper_hidden_ch=hyper_hidden_ch)
self.layers = nn.ModuleList()
self.layers.append(PreconfHyperLayer(in_ch=in_ch, out_ch=hidden_ch))
for i in range(num_hidden_layers):
self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=hidden_ch))
if outermost_linear:
self.layers.append(PreconfHyperLinear(in_ch=hidden_ch, out_ch=out_ch))
else:
self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=out_ch))
def forward(self, hyper_input):
'''
:param hyper_input: Input to hypernetwork.
:return: nn.Module; Predicted fully connected neural network.
'''
net = []
for i in range(len(self.layers)):
net.append(self.layers[i](hyper_input))
return nn.Sequential(*net)
class BatchLinear(nn.Module):
def __init__(self,
weights,
biases):
'''Implements a batch linear layer.
:param weights: Shape: (batch, out_ch, in_ch)
:param biases: Shape: (batch, 1, out_ch)
'''
super().__init__()
self.weights = weights
self.biases = biases
def __repr__(self):
return "BatchLinear(batch=%d, in_ch=%d, out_ch=%d)"%(
self.weights.shape[0], self.weights.shape[-1], self.weights.shape[-2])
def forward(self, input):
output = input.matmul(self.weights.permute(*[i for i in range(len(self.weights.shape)-2)], -1, -2))
output += self.biases
return output
def last_hyper_layer_init(m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
m.weight.data *= 1e-1
class HyperLinear(nn.Module):
'''A hypernetwork that predicts a single linear layer (weights & biases).'''
def __init__(self,
in_ch,
out_ch,
hyper_in_ch,
hyper_num_hidden_layers,
hyper_hidden_ch):
super().__init__()
self.in_ch = in_ch
self.out_ch = out_ch
self.hypo_params = FCBlock(
in_features=hyper_in_ch,
hidden_ch=hyper_hidden_ch,
num_hidden_layers=hyper_num_hidden_layers,
out_features=(in_ch * out_ch) + out_ch,
outermost_linear=True)
self.hypo_params[-1].apply(last_hyper_layer_init)
def forward(self, hyper_input):
hypo_params = self.hypo_params(hyper_input.cuda())
# Indices explicit to catch erros in shape of output layer
weights = hypo_params[..., :self.in_ch * self.out_ch]
biases = hypo_params[..., self.in_ch * self.out_ch:(self.in_ch * self.out_ch)+self.out_ch]
biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch)
weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch)
return BatchLinear(weights=weights, biases=biases)
| 8,327 | 32.991837 | 125 | py |
NSVF | NSVF-main/fairnr/modules/module_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules import LayerNorm
from fairseq.utils import get_activation_fn
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
return m
class PosEmbLinear(nn.Module):
def __init__(self, in_dim, out_dim, no_linear=False, scale=1, *args, **kwargs):
super().__init__()
assert out_dim % (2 * in_dim) == 0, "dimension must be dividable"
half_dim = out_dim // 2 // in_dim
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
self.emb = nn.Parameter(emb, requires_grad=False)
self.linear = Linear(out_dim, out_dim) if not no_linear else None
self.scale = scale
self.in_dim = in_dim
self.out_dim = out_dim
self.cat_input = False
def forward(self, x):
assert x.size(-1) == self.in_dim, "size must match"
sizes = x.size()
x = self.scale * x.unsqueeze(-1) @ self.emb.unsqueeze(0)
x = torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
x = x.view(*sizes[:-1], self.out_dim)
if self.linear is not None:
return self.linear(x)
return x
class NeRFPosEmbLinear(nn.Module):
def __init__(self, in_dim, out_dim, angular=False, no_linear=False, cat_input=False):
super().__init__()
assert out_dim % (2 * in_dim) == 0, "dimension must be dividable"
L = out_dim // 2 // in_dim
emb = torch.exp(torch.arange(L, dtype=torch.float) * math.log(2.))
if not angular:
emb = emb * math.pi
self.emb = nn.Parameter(emb, requires_grad=False)
self.angular = angular
self.linear = Linear(out_dim, out_dim) if not no_linear else None
self.in_dim = in_dim
self.out_dim = out_dim
self.cat_input = cat_input
def forward(self, x):
assert x.size(-1) == self.in_dim, "size must match"
sizes = x.size()
inputs = x.clone()
if self.angular:
x = torch.acos(x.clamp(-1 + 1e-6, 1 - 1e-6))
x = x.unsqueeze(-1) @ self.emb.unsqueeze(0)
x = torch.cat([torch.sin(x), torch.cos(x)], dim=-1)
x = x.view(*sizes[:-1], self.out_dim)
if self.linear is not None:
x = self.linear(x)
if self.cat_input:
x = torch.cat([x, inputs], -1)
return x
def extra_repr(self) -> str:
outstr = 'Sinusoidal (in={}, out={}, angular={})'.format(
self.in_dim, self.out_dim, self.angular)
if self.cat_input:
outstr = 'Cat({}, {})'.format(outstr, self.in_dim)
return outstr
class FCLayer(nn.Module):
"""
Reference:
https://github.com/vsitzmann/pytorch_prototyping/blob/10f49b1e7df38a58fd78451eac91d7ac1a21df64/pytorch_prototyping.py
"""
def __init__(self, in_dim, out_dim, with_ln=True):
super().__init__()
self.net = [nn.Linear(in_dim, out_dim)]
if with_ln:
self.net += [nn.LayerNorm([out_dim])]
self.net += [nn.ReLU()]
self.net = nn.Sequential(*self.net)
def forward(self, x):
return self.net(x)
class FCBlock(nn.Module):
def __init__(self,
hidden_ch,
num_hidden_layers,
in_features,
out_features,
outermost_linear=False,
with_ln=True):
super().__init__()
self.net = []
self.net.append(FCLayer(in_features, hidden_ch, with_ln))
for i in range(num_hidden_layers):
self.net.append(FCLayer(hidden_ch, hidden_ch, with_ln))
if outermost_linear:
self.net.append(Linear(hidden_ch, out_features))
else:
self.net.append(FCLayer(hidden_ch, out_features, with_ln))
self.net = nn.Sequential(*self.net)
self.net.apply(self.init_weights)
def __getitem__(self, item):
return self.net[item]
def init_weights(self, m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
def forward(self, input):
return self.net(input)
class InvertableMapping(nn.Module):
def __init__(self, style='simple'):
super().__init__()
self.style = style
def f(self, x): # (0, 1) --> (0, +inf)
if self.style == 'simple':
return x / (1 - x + 1e-7)
raise NotImplementedError
def g(self, y): # (0, +inf) --> (0, 1)
if self.style == 'simple':
return y / (1 + y)
raise NotImplementedError
def dy(self, x):
if self.style == 'simple':
return 1 / ((1 - x) ** 2 + 1e-7)
raise NotImplementedError | 5,337 | 31.54878 | 125 | py |
NSVF | NSVF-main/fairnr/modules/implicit.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.utils import get_activation_fn
from fairnr.modules.hyper import HyperFC
from fairnr.modules.module_utils import FCLayer
class BackgroundField(nn.Module):
"""
Background (we assume a uniform color)
"""
def __init__(self, out_dim=3, bg_color="1.0,1.0,1.0", min_color=-1, stop_grad=False, background_depth=5.0):
super().__init__()
if out_dim == 3: # directly model RGB
bg_color = [float(b) for b in bg_color.split(',')] if isinstance(bg_color, str) else [bg_color]
if min_color == -1:
bg_color = [b * 2 - 1 for b in bg_color]
if len(bg_color) == 1:
bg_color = bg_color + bg_color + bg_color
bg_color = torch.tensor(bg_color)
else:
bg_color = torch.ones(out_dim).uniform_()
if min_color == -1:
bg_color = bg_color * 2 - 1
self.out_dim = out_dim
self.bg_color = nn.Parameter(bg_color, requires_grad=not stop_grad)
self.depth = background_depth
def forward(self, x, **kwargs):
return self.bg_color.unsqueeze(0).expand(
*x.size()[:-1], self.out_dim)
class ImplicitField(nn.Module):
def __init__(self, in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=False, with_ln=True, skips=None, spec_init=True):
super().__init__()
self.skips = skips
self.net = []
prev_dim = in_dim
for i in range(num_layers):
next_dim = out_dim if i == (num_layers - 1) else hidden_dim
if (i == (num_layers - 1)) and outmost_linear:
self.net.append(nn.Linear(prev_dim, next_dim))
else:
self.net.append(FCLayer(prev_dim, next_dim, with_ln=with_ln))
prev_dim = next_dim
if (self.skips is not None) and (i in self.skips) and (i != (num_layers - 1)):
prev_dim += in_dim
if num_layers > 0:
self.net = nn.ModuleList(self.net)
if spec_init:
self.net.apply(self.init_weights)
def forward(self, x):
y = self.net[0](x)
for i in range(len(self.net) - 1):
if (self.skips is not None) and (i in self.skips):
y = torch.cat((x, y), dim=-1)
y = self.net[i+1](y)
return y
def init_weights(self, m):
if type(m) == nn.Linear:
nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity='relu', mode='fan_in')
class HyperImplicitField(nn.Module):
def __init__(self, hyper_in_dim, in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=False):
super().__init__()
self.hyper_in_dim = hyper_in_dim
self.in_dim = in_dim
self.net = HyperFC(
hyper_in_dim,
1, 256,
hidden_dim,
num_layers,
in_dim,
out_dim,
outermost_linear=outmost_linear
)
def forward(self, x, c):
assert (x.size(-1) == self.in_dim) and (c.size(-1) == self.hyper_in_dim)
if self.nerfpos is not None:
x = torch.cat([x, self.nerfpos(x)], -1)
return self.net(c)(x.unsqueeze(0)).squeeze(0)
class SignedDistanceField(ImplicitField):
"""
Predictor for density or SDF values.
"""
def __init__(self, in_dim, hidden_dim, num_layers=1,
recurrent=False, with_ln=True, spec_init=True):
super().__init__(in_dim, in_dim, in_dim, num_layers-1, with_ln=with_ln, spec_init=spec_init)
self.recurrent = recurrent
if recurrent:
assert num_layers > 1
self.hidden_layer = nn.LSTMCell(input_size=in_dim, hidden_size=hidden_dim)
self.hidden_layer.apply(init_recurrent_weights)
lstm_forget_gate_init(self.hidden_layer)
else:
self.hidden_layer = FCLayer(in_dim, hidden_dim, with_ln) \
if num_layers > 0 else nn.Identity()
prev_dim = hidden_dim if num_layers > 0 else in_dim
self.output_layer = nn.Linear(prev_dim, 1)
def forward(self, x, state=None):
if self.recurrent:
shape = x.size()
state = self.hidden_layer(x.view(-1, shape[-1]), state)
if state[0].requires_grad:
state[0].register_hook(lambda x: x.clamp(min=-5, max=5))
return self.output_layer(state[0].view(*shape[:-1], -1)).squeeze(-1), state
else:
return self.output_layer(self.hidden_layer(x)).squeeze(-1), None
class TextureField(ImplicitField):
"""
Pixel generator based on 1x1 conv networks
"""
def __init__(self, in_dim, hidden_dim, num_layers,
with_alpha=False, with_ln=True, spec_init=True):
out_dim = 3 if not with_alpha else 4
super().__init__(in_dim, out_dim, hidden_dim, num_layers,
outmost_linear=True, with_ln=with_ln, spec_init=spec_init)
# ------------------ #
# helper functions #
# ------------------ #
def init_recurrent_weights(self):
for m in self.modules():
if type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
nn.init.kaiming_normal_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def lstm_forget_gate_init(lstm_layer):
for name, parameter in lstm_layer.named_parameters():
if not "bias" in name: continue
n = parameter.size(0)
start, end = n // 4, n // 2
parameter.data[start:end].fill_(1.)
def clip_grad_norm_hook(x, max_norm=10):
total_norm = x.norm()
total_norm = total_norm ** (1 / 2.)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
return x * clip_coef | 6,163 | 34.837209 | 111 | py |
NSVF | NSVF-main/fairnr/criterions/rendering_loss.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn.functional as F
import torch
from torch import Tensor
from fairseq import metrics
from fairseq.utils import item
from fairseq.criterions import FairseqCriterion, register_criterion
import fairnr.criterions.utils as utils
class RenderingCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(task)
self.args = args
self.hierarchical = getattr(args, 'hierarchical_loss', False)
@classmethod
def build_criterion(cls, args, task):
"""Construct a criterion from command-line args."""
return cls(args, task)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--hierarchical-loss', action='store_true',
help='if set, it computes both the coarse and fine-level losses in hierarchical sampling.')
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample)
sample.update(net_output['samples'])
loss, loss_output = self.compute_loss(model, net_output, sample, reduce=reduce)
if self.hierarchical:
assert net_output.get('coarse', None) is not None, "missing coarse level outputs."
loss0, loss_output0 = self.compute_loss(model, net_output['coarse'], sample, reduce=reduce)
loss = loss + loss0
loss_output.update({'cor-' + key: loss_output0[key] for key in loss_output0})
sample_size = 1
logging_output = {
'loss': loss.data.item() if reduce else loss.data,
'nsentences': sample['alpha'].size(0),
'ntokens': sample['alpha'].size(1),
'npixels': sample['alpha'].size(2),
'sample_size': sample_size,
}
for w in loss_output:
logging_output[w] = loss_output[w]
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
raise NotImplementedError
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
summed_logging_outputs = {
w: sum(log.get(w, 0) for log in logging_outputs)
for w in logging_outputs[0]
}
sample_size = summed_logging_outputs['sample_size']
for w in summed_logging_outputs:
if '_loss' in w:
metrics.log_scalar(w.split('_')[0], summed_logging_outputs[w] / sample_size, sample_size, round=3)
elif '_weight' in w:
metrics.log_scalar('w_' + w[:3], summed_logging_outputs[w] / sample_size, sample_size, round=3)
elif '_acc' in w:
metrics.log_scalar('a_' + w[:3], summed_logging_outputs[w] / sample_size, sample_size, round=3)
elif w == 'loss':
metrics.log_scalar('loss', summed_logging_outputs['loss'] / sample_size, sample_size, priority=0, round=3)
elif '_log' in w:
metrics.log_scalar(w[:3], summed_logging_outputs[w] / sample_size, sample_size, priority=1, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
@register_criterion('srn_loss')
class SRNLossCriterion(RenderingCriterion):
def __init__(self, args, task):
super().__init__(args, task)
# HACK: to avoid warnings in c10d
self.dummy_loss = torch.nn.Parameter(torch.tensor(0.0, dtype=torch.float32), requires_grad=True)
if args.vgg_weight > 0:
from fairnr.criterions.perceptual_loss import VGGPerceptualLoss
self.vgg = VGGPerceptualLoss(resize=False)
if args.eval_lpips:
from lpips_pytorch import LPIPS
self.lpips = LPIPS(net_type='alex', version='0.1')
@staticmethod
def add_args(parser):
RenderingCriterion.add_args(parser)
parser.add_argument('--L1', action='store_true',
help='if enabled, use L1 instead of L2 for RGB loss')
parser.add_argument('--color-weight', type=float, default=256.0)
parser.add_argument('--depth-weight', type=float, default=0.0)
parser.add_argument('--depth-weight-decay', type=str, default=None,
help="""if set, use tuple to set (final_ratio, steps).
For instance, (0, 30000)
""")
parser.add_argument('--alpha-weight', type=float, default=0.0)
parser.add_argument('--vgg-weight', type=float, default=0.0)
parser.add_argument('--eikonal-weight', type=float, default=0.0)
parser.add_argument('--regz-weight', type=float, default=0.0)
parser.add_argument('--vgg-level', type=int, choices=[1,2,3,4], default=2)
parser.add_argument('--eval-lpips', action='store_true',
help="evaluate LPIPS scores in validation")
parser.add_argument('--no-background-loss', action='store_true')
def compute_loss(self, model, net_output, sample, reduce=True):
losses, other_logs = {}, {}
# prepare data before computing loss
sampled_uv = sample['sampled_uv'] # S, V, 2, N, P, P (patch-size)
S, V, _, N, P1, P2 = sampled_uv.size()
H, W, h, w = sample['size'][0, 0].long().cpu().tolist()
L = N * P1 * P2
flatten_uv = sampled_uv.view(S, V, 2, L)
flatten_index = (flatten_uv[:,:,0] // h + flatten_uv[:,:,1] // w * W).long()
assert 'colors' in sample and sample['colors'] is not None, "ground-truth colors not provided"
target_colors = sample['colors']
masks = (sample['alpha'] > 0) if self.args.no_background_loss else None
if L < target_colors.size(2):
target_colors = target_colors.gather(2, flatten_index.unsqueeze(-1).repeat(1,1,1,3))
masks = masks.gather(2, flatten_uv) if masks is not None else None
if 'other_logs' in net_output:
other_logs.update(net_output['other_logs'])
# computing loss
if self.args.color_weight > 0:
color_loss = utils.rgb_loss(
net_output['colors'], target_colors,
masks, self.args.L1)
losses['color_loss'] = (color_loss, self.args.color_weight)
if self.args.alpha_weight > 0:
_alpha = net_output['missed'].reshape(-1)
alpha_loss = torch.log1p(
1. / 0.11 * _alpha.float() * (1 - _alpha.float())
).mean().type_as(_alpha)
losses['alpha_loss'] = (alpha_loss, self.args.alpha_weight)
if self.args.depth_weight > 0:
if sample['depths'] is not None:
target_depths = target_depths.gather(2, flatten_index)
depth_mask = masks & (target_depths > 0)
depth_loss = utils.depth_loss(net_output['depths'], target_depths, depth_mask)
else:
# no depth map is provided, depth loss only applied on background based on masks
max_depth_target = self.args.max_depth * torch.ones_like(net_output['depths'])
if sample['mask'] is not None:
depth_loss = utils.depth_loss(net_output['depths'], max_depth_target, (1 - sample['mask']).bool())
else:
depth_loss = utils.depth_loss(net_output['depths'], max_depth_target, ~masks)
depth_weight = self.args.depth_weight
if self.args.depth_weight_decay is not None:
final_factor, final_steps = eval(self.args.depth_weight_decay)
depth_weight *= max(0, 1 - (1 - final_factor) * self.task._num_updates / final_steps)
other_logs['depth_weight'] = depth_weight
losses['depth_loss'] = (depth_loss, depth_weight)
if self.args.vgg_weight > 0:
assert P1 * P2 > 1, "we have to use a patch-based sampling for VGG loss"
target_colors = target_colors.reshape(-1, P1, P2, 3).permute(0, 3, 1, 2) * .5 + .5
output_colors = net_output['colors'].reshape(-1, P1, P2, 3).permute(0, 3, 1, 2) * .5 + .5
vgg_loss = self.vgg(output_colors, target_colors)
losses['vgg_loss'] = (vgg_loss, self.args.vgg_weight)
if self.args.eikonal_weight > 0:
losses['eik_loss'] = (net_output['eikonal-term'].mean(), self.args.eikonal_weight)
# if self.args.regz_weight > 0:
losses['reg_loss'] = (net_output['regz-term'].mean(), self.args.regz_weight)
loss = sum(losses[key][0] * losses[key][1] for key in losses)
# add a dummy loss
loss = loss + model.dummy_loss + self.dummy_loss * 0.
logging_outputs = {key: item(losses[key][0]) for key in losses}
logging_outputs.update(other_logs)
return loss, logging_outputs
| 9,677 | 43.805556 | 122 | py |
NSVF | NSVF-main/fairnr/criterions/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
TINY = 1e-7
def rgb_loss(predicts, rgbs, masks=None, L1=False, sum=False):
if masks is not None:
if masks.sum() == 0:
return predicts.new_zeros(1).mean()
predicts = predicts[masks]
rgbs = rgbs[masks]
if L1:
loss = torch.abs(predicts - rgbs).sum(-1)
else:
loss = ((predicts - rgbs) ** 2).sum(-1)
return loss.mean() if not sum else loss.sum()
def depth_loss(depths, depth_gt, masks=None, sum=False):
if masks is not None:
if masks.sum() == 0:
return depths.new_zeros(1).mean()
depth_gt = depth_gt[masks]
depths = depths[masks]
loss = (depths[masks] - depth_gt[masks]) ** 2
return loss.mean() if not sum else loss.sum() | 971 | 26 | 65 | py |
NSVF | NSVF-main/fairnr/criterions/perceptual_loss.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, resize=False):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(torchvision.models.vgg16(pretrained=True).features[:4].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[4:9].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[9:16].eval())
blocks.append(torchvision.models.vgg16(pretrained=True).features[16:23].eval())
self.blocks = torch.nn.ModuleList(blocks)
self.transform = torch.nn.functional.interpolate
self.mean = torch.nn.Parameter(torch.tensor([0.485, 0.456, 0.406]).view(1,3,1,1))
self.std = torch.nn.Parameter(torch.tensor([0.229, 0.224, 0.225]).view(1,3,1,1))
self.resize = resize
# NO GRADIENT!
for param in self.parameters():
param.requires_grad = False
def forward(self, input, target, level=2):
# print(input.device, input.dtype, self.mean.device, self.mean.dtype, self.std, self.std.dtype)
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss = 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i < level:
loss += torch.nn.functional.mse_loss(x, y)
else:
break
return loss
| 2,023 | 39.48 | 103 | py |
NSVF | NSVF-main/fairnr/models/nsvf_bg.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import cv2, math, time, copy, json
import numpy as np
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
register_model_architecture
)
from fairseq.utils import item, with_torch_seed
from fairnr.data.geometry import compute_normal_map, fill_in
from fairnr.models.nsvf import NSVFModel, base_architecture, nerf_style_architecture
from fairnr.models.fairnr_model import get_encoder, get_field, get_reader, get_renderer
@register_model('nsvf_bg')
class NSVFBGModel(NSVFModel):
def __init__(self, args, setups):
super().__init__(args, setups)
args_copy = copy.deepcopy(args)
if getattr(args, "bg_field_args", None) is not None:
args_copy.__dict__.update(json.loads(args.bg_field_args))
else:
args_copy.inputs_to_density = "pos:10"
args_copy.inputs_to_texture = "feat:0:256, ray:4:3:b"
self.bg_field = get_field("radiance_field")(args_copy)
self.bg_encoder = get_encoder("volume_encoder")(args_copy)
@classmethod
def add_args(cls, parser):
super().add_args(parser)
parser.add_argument('--near', type=float, help='near distance of the volume')
parser.add_argument('--far', type=float, help='far distance of the volume')
parser.add_argument('--nerf-steps', type=int, help='additional nerf steps')
parser.add_argument('--bg-field-args', type=str, default=None, help='override args for bg field')
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# we will trace the background field here
S, V, P = sizes
fullsize = S * V * P
vox_colors = fill_in((fullsize, 3), hits, all_results['colors'], 0.0)
vox_missed = fill_in((fullsize, ), hits, all_results['missed'], 1.0)
vox_depths = fill_in((fullsize, ), hits, all_results['depths'], 0.0)
mid_dis = (self.args.near + self.args.far) / 2
n_depth = fill_in((fullsize, ), hits, all_results['min_depths'], mid_dis)[:, None]
f_depth = fill_in((fullsize, ), hits, all_results['max_depths'], mid_dis)[:, None]
# front field
nerf_step = getattr(self.args, "nerf_steps", 64)
max_depth = n_depth
min_depth = torch.ones_like(max_depth) * self.args.near
intersection_outputs = {
"min_depth": min_depth, "max_depth": max_depth,
"probs": torch.ones_like(max_depth),
"steps": torch.ones_like(max_depth).squeeze(-1) * nerf_step,
"intersected_voxel_idx": torch.zeros_like(min_depth).int()}
with with_torch_seed(self.unique_seed):
fg_samples = self.bg_encoder.ray_sample(intersection_outputs)
fg_results = self.raymarcher(
self.bg_encoder, self.bg_field, ray_start, ray_dir, fg_samples, {})
# back field
min_depth = f_depth
max_depth = torch.ones_like(min_depth) * self.args.far
intersection_outputs = {
"min_depth": min_depth, "max_depth": max_depth,
"probs": torch.ones_like(max_depth),
"steps": torch.ones_like(max_depth).squeeze(-1) * nerf_step,
"intersected_voxel_idx": torch.zeros_like(min_depth).int()}
with with_torch_seed(self.unique_seed):
bg_samples = self.bg_encoder.ray_sample(intersection_outputs)
bg_results = self.raymarcher(
self.bg_encoder, self.bg_field, ray_start, ray_dir, bg_samples, {})
# merge background to foreground
all_results['voxcolors'] = vox_colors.view(S, V, P, 3)
all_results['colors'] = fg_results['colors'] + fg_results['missed'][:, None] * (vox_colors + vox_missed[:, None] * bg_results['colors'])
all_results['depths'] = fg_results['depths'] + fg_results['missed'] * (vox_depths + vox_missed * bg_results['depths'])
all_results['missed'] = fg_results['missed'] * vox_missed * bg_results['missed']
# apply the NSVF post-processing
return super().postprocessing(ray_start, ray_dir, all_results, hits, sizes)
def _visualize(self, images, sample, output, state, **kwargs):
img_id, shape, view, width, name = state
images = super()._visualize(images, sample, output, state, **kwargs)
if 'voxcolors' in output and output['voxcolors'] is not None:
images['{}_vcolors/{}:HWC'.format(name, img_id)] ={
'img': output['voxcolors'][shape, view],
'min_val': float(self.args.min_color)
}
return images
@register_model_architecture("nsvf_bg", "nsvf_bg")
def base_bg_architecture(args):
base_architecture(args)
@register_model_architecture("nsvf_bg", "nsvf_bg_xyz")
def base_bg2_architecture(args):
args.nerf_steps = getattr(args, "nerf_steps", 64)
nerf_style_architecture(args)
@register_model('shared_nsvf_bg')
class SharedNSVFBGModel(NSVFBGModel):
ENCODER = 'shared_sparsevoxel_encoder'
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# we will trace the background field here
# pass context vector from NSVF to NeRF
self.bg_encoder.precompute(context=self.encoder.contexts(self.encoder.cid).unsqueeze(0))
return super().postprocessing(ray_start, ray_dir, all_results, hits, sizes)
@torch.no_grad()
def split_voxels(self):
logger.info("half the global voxel size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].voxel_size.item(),
self.encoder.all_voxels[0].voxel_size.item() * .5))
self.encoder.splitting()
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].voxel_size *= .5
self.encoder.all_voxels[id].max_hits *= 1.5
self.clean_caches()
@torch.no_grad()
def reduce_stepsize(self):
logger.info("reduce the raymarching step size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].step_size.item(),
self.encoder.all_voxels[0].step_size.item() * .5))
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].step_size *= .5
@register_model_architecture("shared_nsvf_bg", "shared_nsvf_bg_xyz")
def base_shared_architecture(args):
args.context_embed_dim = getattr(args, "context_embed_dim", 96)
args.hypernetwork = getattr(args, "hypernetwork", False)
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10, context:0:96")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4:3:b")
args.bg_field_args = getattr(args, "bg_field_args",
"{'inputs_to_density': 'pos:10, context:0:96', 'inputs_to_texture': 'feat:0:256, ray:4:3:b}'}")
nerf_style_architecture(args) | 7,079 | 43.810127 | 144 | py |
NSVF | NSVF-main/fairnr/models/multi_nsvf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq.models import (
register_model,
register_model_architecture
)
from fairnr.models.nsvf import NSVFModel, base_architecture
@register_model('multi_nsvf')
class MultiNSVFModel(NSVFModel):
ENCODER = 'multi_sparsevoxel_encoder'
@torch.no_grad()
def split_voxels(self):
logger.info("half the global voxel size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].voxel_size.item(),
self.encoder.all_voxels[0].voxel_size.item() * .5))
self.encoder.splitting()
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].voxel_size *= .5
self.encoder.all_voxels[id].max_hits *= 1.5
@torch.no_grad()
def reduce_stepsize(self):
logger.info("reduce the raymarching step size {:.4f} -> {:.4f}".format(
self.encoder.all_voxels[0].step_size.item(),
self.encoder.all_voxels[0].step_size.item() * .5))
for id in range(len(self.encoder.all_voxels)):
self.encoder.all_voxels[id].step_size *= .5
@register_model("shared_nsvf")
class SharedNSVFModel(MultiNSVFModel):
ENCODER = 'shared_sparsevoxel_encoder'
@register_model_architecture('multi_nsvf', "multi_nsvf_base")
def multi_base_architecture(args):
base_architecture(args)
@register_model_architecture('shared_nsvf', 'shared_nsvf')
def shared_base_architecture(args):
# encoder
args.context_embed_dim = getattr(args, "context_embed_dim", 96)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:6:32, context:0:96")
args.hypernetwork = getattr(args, "hypernetwork", False)
base_architecture(args) | 1,938 | 30.786885 | 89 | py |
NSVF | NSVF-main/fairnr/models/nerf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import cv2, math, time
import numpy as np
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
register_model_architecture
)
from fairseq.utils import with_torch_seed
from fairnr.models.fairnr_model import BaseModel
@register_model('nerf')
class NeRFModel(BaseModel):
""" This is a simple re-implementation of the vanilla NeRF
"""
ENCODER = 'volume_encoder'
READER = 'image_reader'
FIELD = 'radiance_field'
RAYMARCHER = 'volume_rendering'
@classmethod
def add_args(cls, parser):
super().add_args(parser)
parser.add_argument('--fixed-num-samples', type=int,
help='number of samples for the first pass along the ray.')
parser.add_argument('--fixed-fine-num-samples', type=int,
help='sample a fixed number of points for each ray in hierarchical sampling, e.g. 64, 128.')
parser.add_argument('--reduce-fine-for-missed', action='store_true',
help='if set, the number of fine samples is discounted based on foreground probability only.')
def preprocessing(self, **kwargs):
return self.encoder.precompute(**kwargs)
def intersecting(self, ray_start, ray_dir, encoder_states, **kwargs):
ray_start, ray_dir, intersection_outputs, hits = \
self.encoder.ray_intersect(ray_start, ray_dir, encoder_states)
return ray_start, ray_dir, intersection_outputs, hits, None
def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):
# sample points and use middle point approximation
with with_torch_seed(self.unique_seed): # make sure each GPU sample differently.
samples = self.encoder.ray_sample(intersection_outputs)
field = self.field_fine if fine and (self.field_fine is not None) else self.field
all_results = self.raymarcher(
self.encoder, field, ray_start, ray_dir, samples, encoder_states
)
return samples, all_results
def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):
# this function is basically the same as that in NSVF model.
depth = samples.get('original_point_depth', samples['sampled_point_depth'])
dists = samples.get('original_point_distance', samples['sampled_point_distance'])
intersection_outputs['min_depth'] = depth - dists * .5
intersection_outputs['max_depth'] = depth + dists * .5
intersection_outputs['intersected_voxel_idx'] = samples['sampled_point_voxel_idx'].contiguous()
# safe_probs = all_results['probs'] + 1e-8 # HACK: make a non-zero distribution
safe_probs = all_results['probs'] + 1e-5 # NeRF used 1e-5, will this make a change?
intersection_outputs['probs'] = safe_probs / safe_probs.sum(-1, keepdim=True)
intersection_outputs['steps'] = safe_probs.new_ones(*safe_probs.size()[:-1])
if getattr(self.args, "fixed_fine_num_samples", 0) > 0:
intersection_outputs['steps'] = intersection_outputs['steps'] * self.args.fixed_fine_num_samples
if getattr(self.args, "reduce_fine_for_missed", False):
intersection_outputs['steps'] = intersection_outputs['steps'] * safe_probs.sum(-1)
return intersection_outputs
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# vanilla nerf hits everything. so no need to fill_in
S, V, P = sizes
fullsize = S * V * P
all_results['missed'] = all_results['missed'].view(S, V, P)
all_results['colors'] = all_results['colors'].view(S, V, P, 3)
all_results['depths'] = all_results['depths'].view(S, V, P)
if 'z' in all_results:
all_results['z'] = all_results['z'].view(S, V, P)
BG_DEPTH = self.field.bg_color.depth
bg_color = self.field.bg_color(all_results['colors'])
all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)
all_results['depths'] += all_results['missed'] * BG_DEPTH
if 'normal' in all_results:
all_results['normal'] = all_results['normal'].view(S, V, P, 3)
return all_results
def add_other_logs(self, all_results):
return {}
@register_model_architecture("nerf", "nerf_base")
def base_architecture(args):
# parameter needs to be changed
args.near = getattr(args, "near", 2)
args.far = getattr(args, "far", 4)
args.fixed_num_samples = getattr(args, "fixed_num_samples", 64)
args.fixed_fine_num_samples = getattr(args, "fixed_fine_num_samples", 128)
args.hierarchical_sampling = getattr(args, "hierarchical_sampling", True)
args.use_fine_model = getattr(args, "use_fine_model", True)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_embed_dim = getattr(args, "feature_embed_dim", 256)
args.density_embed_dim = getattr(args, "density_embed_dim", 128)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 256)
# API Update: fix the number of layers
args.feature_layers = getattr(args, "feature_layers", 1)
args.texture_layers = getattr(args, "texture_layers", 3)
args.background_stop_gradient = getattr(args, "background_stop_gradient", False)
args.background_depth = getattr(args, "background_depth", 5.0)
# raymarcher
args.discrete_regularization = getattr(args, "discrete_regularization", False)
args.deterministic_step = getattr(args, "deterministic_step", False)
args.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0)
# reader
args.pixel_per_view = getattr(args, "pixel_per_view", 2048)
args.sampling_on_mask = getattr(args, "sampling_on_mask", 0.0)
args.sampling_at_center = getattr(args, "sampling_at_center", 1.0)
args.sampling_on_bbox = getattr(args, "sampling_on_bbox", False)
args.sampling_patch_size = getattr(args, "sampling_patch_size", 1)
args.sampling_skipping_size = getattr(args, "sampling_skipping_size", 1)
# others
args.chunk_size = getattr(args, "chunk_size", 64)
args.valid_chunk_size = getattr(args, "valid_chunk_size", 64)
@register_model_architecture("nerf", "nerf_deep")
def nerf_deep_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
base_architecture(args)
@register_model_architecture("nerf", "nerf_nerf")
def nerf_nerf_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.texture_layers = getattr(args, "texture_layers", 0)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
base_architecture(args)
@register_model_architecture("nerf", "nerf_xyzn_nope")
def nerf2_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:0:3, normal:0:3, sigma:0:1, ray:4")
base_architecture(args)
@register_model('sdf_nerf')
class SDFNeRFModel(NeRFModel):
FIELD = "sdf_radiance_field"
@register_model_architecture("sdf_nerf", "sdf_nerf")
def sdf_nsvf_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
nerf2_architecture(args)
@register_model('sg_nerf')
class SGNeRFModel(NeRFModel):
""" This is a simple re-implementation of the vanilla NeRF
"""
ENCODER = 'infinite_volume_encoder'
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# vanilla nerf hits everything. so no need to fill_in
S, V, P = sizes
all_results['missed'] = all_results['missed'].view(S, V, P)
all_results['colors'] = all_results['colors'].view(S, V, P, 3)
all_results['depths'] = all_results['depths'].view(S, V, P)
if 'z' in all_results:
all_results['z'] = all_results['z'].view(S, V, P)
if 'normal' in all_results:
all_results['normal'] = all_results['normal'].view(S, V, P, 3)
return all_results
@register_model_architecture("sg_nerf", "sg_nerf_base")
def sg_nerf_architecture(args):
INF_FAR = 1e6
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10:4")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4:3:b")
args.near = getattr(args, "near", 2)
args.far = getattr(args, "far", INF_FAR)
base_architecture(args)
@register_model_architecture("sg_nerf", "sg_nerf_new")
def sg_nerf2_architecture(args):
args.nerf_style_mlp = getattr(args, "nerf_style_mlp", True)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 128)
sg_nerf_architecture(args) | 9,380 | 43.25 | 117 | py |
NSVF | NSVF-main/fairnr/models/fairnr_model.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Base classes for various models.
The basic principle of differentiable rendering is two components:
-- an field or so-called geometric field (GE)
-- an raymarcher or so-called differentiable ray-marcher (RM)
So it can be composed as a GERM model
"""
import logging
import torch
import torch.nn as nn
import skimage.metrics
import imageio, os
import numpy as np
import copy
from collections import defaultdict
from fairseq.models import BaseFairseqModel
from fairseq.utils import with_torch_seed
from fairnr.modules.encoder import get_encoder
from fairnr.modules.field import get_field
from fairnr.modules.renderer import get_renderer
from fairnr.modules.reader import get_reader
from fairnr.data.geometry import ray, compute_normal_map
from fairnr.data.data_utils import recover_image
logger = logging.getLogger(__name__)
class BaseModel(BaseFairseqModel):
"""Base class"""
ENCODER = 'abstract_encoder'
FIELD = 'abstract_field'
RAYMARCHER = 'abstract_renderer'
READER = 'abstract_reader'
def __init__(self, args, setups):
super().__init__()
self.args = args
self.hierarchical = getattr(self.args, "hierarchical_sampling", False)
self.reader = setups['reader']
self.encoder = setups['encoder']
self.field = setups['field']
self.raymarcher = setups['raymarcher']
self.cache = None
self._num_updates = 0
if getattr(self.args, "use_fine_model", False):
self.field_fine = copy.deepcopy(self.field)
else:
self.field_fine = None
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
reader = get_reader(cls.READER)(args)
encoder = get_encoder(cls.ENCODER)(args)
field = get_field(cls.FIELD)(args)
raymarcher = get_renderer(cls.RAYMARCHER)(args)
setups = {
'reader': reader,
'encoder': encoder,
'field': field,
'raymarcher': raymarcher
}
return cls(args, setups)
@classmethod
def add_args(cls, parser):
get_reader(cls.READER).add_args(parser)
get_renderer(cls.RAYMARCHER).add_args(parser)
get_encoder(cls.ENCODER).add_args(parser)
get_field(cls.FIELD).add_args(parser)
# model-level args
parser.add_argument('--hierarchical-sampling', action='store_true',
help='if set, a second ray marching pass will be performed based on the first time probs.')
parser.add_argument('--use-fine-model', action='store_true',
help='if set, we will simultaneously optimize two networks, a coarse field and a fine field.')
def set_num_updates(self, num_updates):
self._num_updates = num_updates
super().set_num_updates(num_updates)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
if (self.field_fine is None) and \
("field_fine" in [key.split('.')[0] for key in state_dict.keys()]):
# load checkpoint has fine-field network, copying weights to field network
for fine_key in [key for key in state_dict.keys() if "field_fine" in key]:
state_dict[fine_key.replace("field_fine", "field")] = state_dict[fine_key]
del state_dict[fine_key]
@property
def dummy_loss(self):
return sum([p.sum() for p in self.parameters()]) * 0.0
def forward(self, ray_split=1, **kwargs):
with with_torch_seed(self.unique_seed): # make sure different GPU sample different rays
ray_start, ray_dir, uv = self.reader(**kwargs)
kwargs.update({
'field_fn': self.field.forward,
'input_fn': self.encoder.forward})
if ray_split == 1:
results = self._forward(ray_start, ray_dir, **kwargs)
else:
total_rays = ray_dir.shape[2]
chunk_size = total_rays // ray_split
results = [
self._forward(
ray_start, ray_dir[:, :, i: i+chunk_size], **kwargs)
for i in range(0, total_rays, chunk_size)
]
results = self.merge_outputs(results)
results['samples'] = {
'sampled_uv': results.get('sampled_uv', uv),
'ray_start': ray_start,
'ray_dir': ray_dir
}
# caching the prediction
self.cache = {
w: results[w].detach()
if isinstance(w, torch.Tensor)
else results[w]
for w in results
}
return results
def _forward(self, ray_start, ray_dir, **kwargs):
S, V, P, _ = ray_dir.size()
assert S == 1, "we only supports single object for now."
encoder_states = self.preprocessing(**kwargs)
ray_start, ray_dir, intersection_outputs, hits, sampled_uv = \
self.intersecting(ray_start, ray_dir, encoder_states, **kwargs)
# save the original rays
ray_start0 = ray_start.reshape(-1, 3).clone()
ray_dir0 = ray_dir.reshape(-1, 3).clone()
P = ray_dir.size(1) // V
all_results = defaultdict(lambda: None)
if hits.sum() > 0:
intersection_outputs = {
name: outs[hits] for name, outs in intersection_outputs.items()}
ray_start, ray_dir = ray_start[hits], ray_dir[hits]
encoder_states = {name: s.reshape(-1, s.size(-1)) if s is not None else None
for name, s in encoder_states.items()}
samples, all_results = self.raymarching( # ray-marching
ray_start, ray_dir, intersection_outputs, encoder_states)
if self.hierarchical: # hierarchical sampling
intersection_outputs = self.prepare_hierarchical_sampling(
intersection_outputs, samples, all_results)
coarse_results = all_results.copy()
samples, all_results = self.raymarching(
ray_start, ray_dir, intersection_outputs, encoder_states, fine=True)
all_results['coarse'] = coarse_results
hits = hits.reshape(-1)
all_results = self.postprocessing(ray_start0, ray_dir0, all_results, hits, (S, V, P))
if self.hierarchical:
all_results['coarse'] = self.postprocessing(
ray_start, ray_dir, all_results['coarse'], hits, (S, V, P))
if sampled_uv is not None:
all_results['sampled_uv'] = sampled_uv
all_results['other_logs'] = self.add_other_logs(all_results)
return all_results
def preprocessing(self, **kwargs):
raise NotImplementedError
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
raise NotImplementedError
def intersecting(self, ray_start, ray_dir, encoder_states):
raise NotImplementedError
def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):
raise NotImplementedError
def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):
raise NotImplementedError
def add_other_logs(self, all_results):
raise NotImplementedError
def merge_outputs(self, outputs):
new_output = {}
for key in outputs[0]:
if isinstance(outputs[0][key], torch.Tensor) and outputs[0][key].dim() > 2:
new_output[key] = torch.cat([o[key] for o in outputs], 2)
else:
new_output[key] = outputs[0][key]
return new_output
@torch.no_grad()
def visualize(self, sample, output=None, shape=0, view=0, **kwargs):
width = int(sample['size'][shape, view][1].item())
img_id = '{}_{}'.format(sample['shape'][shape], sample['view'][shape, view])
if output is None:
assert self.cache is not None, "need to run forward-pass"
output = self.cache # make sure to run forward-pass.
sample.update(output['samples'])
images = {}
images = self._visualize(images, sample, output, [img_id, shape, view, width, 'render'])
images = self._visualize(images, sample, sample, [img_id, shape, view, width, 'target'])
if 'coarse' in output: # hierarchical sampling
images = self._visualize(images, sample, output['coarse'], [img_id, shape, view, width, 'coarse'])
images = {
tag: recover_image(width=width, **images[tag])
for tag in images if images[tag] is not None
}
return images
def _visualize(self, images, sample, output, state, **kwargs):
img_id, shape, view, width, name = state
if 'colors' in output and output['colors'] is not None:
images['{}_color/{}:HWC'.format(name, img_id)] ={
'img': output['colors'][shape, view],
'min_val': float(self.args.min_color)
}
if 'depths' in output and output['depths'] is not None:
min_depth, max_depth = output['depths'].min(), output['depths'].max()
if getattr(self.args, "near", None) is not None:
min_depth = self.args.near
max_depth = self.args.far
images['{}_depth/{}:HWC'.format(name, img_id)] = {
'img': output['depths'][shape, view],
'min_val': min_depth,
'max_val': max_depth}
normals = compute_normal_map(
sample['ray_start'][shape, view].float(),
sample['ray_dir'][shape, view].float(),
output['depths'][shape, view].float(),
sample['extrinsics'][shape, view].float().inverse(), width)
images['{}_normal/{}:HWC'.format(name, img_id)] = {
'img': normals, 'min_val': -1, 'max_val': 1}
# generate point clouds from depth
# images['{}_point/{}'.format(name, img_id)] = {
# 'img': torch.cat(
# [ray(sample['ray_start'][shape, view].float(),
# sample['ray_dir'][shape, view].float(),
# output['depths'][shape, view].unsqueeze(-1).float()),
# (output['colors'][shape, view] - self.args.min_color) / (1 - self.args.min_color)], 1), # XYZRGB
# 'raw': True }
if 'z' in output and output['z'] is not None:
images['{}_z/{}:HWC'.format(name, img_id)] = {
'img': output['z'][shape, view], 'min_val': 0, 'max_val': 1}
if 'normal' in output and output['normal'] is not None:
images['{}_predn/{}:HWC'.format(name, img_id)] = {
'img': output['normal'][shape, view], 'min_val': -1, 'max_val': 1}
return images
def add_eval_scores(self, logging_output, sample, output, criterion, scores=['ssim', 'psnr', 'lpips'], outdir=None):
predicts, targets = output['colors'], sample['colors']
ssims, psnrs, lpips, rmses = [], [], [], []
for s in range(predicts.size(0)):
for v in range(predicts.size(1)):
width = int(sample['size'][s, v][1])
p = recover_image(predicts[s, v], width=width, min_val=float(self.args.min_color))
t = recover_image(targets[s, v], width=width, min_val=float(self.args.min_color))
pn, tn = p.numpy(), t.numpy()
p, t = p.to(predicts.device), t.to(targets.device)
if 'ssim' in scores:
ssims += [skimage.metrics.structural_similarity(pn, tn, multichannel=True, data_range=1)]
if 'psnr' in scores:
psnrs += [skimage.metrics.peak_signal_noise_ratio(pn, tn, data_range=1)]
if 'lpips' in scores and hasattr(criterion, 'lpips'):
with torch.no_grad():
lpips += [criterion.lpips(
2 * p.unsqueeze(-1).permute(3,2,0,1) - 1,
2 * t.unsqueeze(-1).permute(3,2,0,1) - 1).item()]
if 'depths' in sample:
td = sample['depths'][sample['depths'] > 0]
pd = output['depths'][sample['depths'] > 0]
rmses += [torch.sqrt(((td - pd) ** 2).mean()).item()]
if outdir is not None:
def imsave(filename, image):
imageio.imsave(os.path.join(outdir, filename), (image * 255).astype('uint8'))
figname = '-{:03d}_{:03d}.png'.format(sample['id'][s], sample['view'][s, v])
imsave('output' + figname, pn)
imsave('target' + figname, tn)
imsave('normal' + figname, recover_image(compute_normal_map(
sample['ray_start'][s, v].float(), sample['ray_dir'][s, v].float(),
output['depths'][s, v].float(), sample['extrinsics'][s, v].float().inverse(), width=width),
min_val=-1, max_val=1, width=width).numpy())
if 'featn2' in output:
imsave('featn2' + figname, output['featn2'][s, v].cpu().numpy())
if 'voxel' in output:
imsave('voxel' + figname, output['voxel'][s, v].cpu().numpy())
if len(ssims) > 0:
logging_output['ssim_loss'] = np.mean(ssims)
if len(psnrs) > 0:
logging_output['psnr_loss'] = np.mean(psnrs)
if len(lpips) > 0:
logging_output['lpips_loss'] = np.mean(lpips)
if len(rmses) > 0:
logging_output['rmses_loss'] = np.mean(rmses)
def adjust(self, **kwargs):
raise NotImplementedError
@property
def text(self):
return "fairnr BaseModel"
@property
def unique_seed(self):
return self._num_updates * 137 + self.args.distributed_rank
| 14,302 | 41.19174 | 121 | py |
NSVF | NSVF-main/fairnr/models/nsvf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import cv2, math, time
import numpy as np
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
register_model,
register_model_architecture
)
from fairseq.utils import item
from fairnr.data.geometry import compute_normal_map, fill_in
from fairnr.models.nerf import NeRFModel
@register_model('nsvf')
class NSVFModel(NeRFModel):
READER = 'image_reader'
ENCODER = 'sparsevoxel_encoder'
FIELD = 'radiance_field'
RAYMARCHER = 'volume_rendering'
@classmethod
def add_args(cls, parser):
super().add_args(parser)
parser.add_argument('--fine-num-sample-ratio', type=float, default=0,
help='raito of samples compared to the first pass')
parser.add_argument('--inverse-distance-coarse-sampling', type=str,
choices=['none', 'camera', 'origin'], default='none',
help='if set, we do not sample points uniformly through voxels.')
def intersecting(self, ray_start, ray_dir, encoder_states, **kwargs):
S = ray_dir.size(0)
ray_start, ray_dir, intersection_outputs, hits, _ = \
super().intersecting(ray_start, ray_dir, encoder_states, **kwargs)
if self.reader.no_sampling and self.training: # sample points after ray-voxel intersection
uv, size = kwargs['uv'], kwargs['size']
mask = hits.reshape(*uv.size()[:2], uv.size(-1))
# sample rays based on voxel intersections
sampled_uv, sampled_masks = self.reader.sample_pixels(
uv, size, mask=mask, return_mask=True)
sampled_masks = sampled_masks.reshape(uv.size(0), -1).bool()
hits, sampled_masks = hits[sampled_masks].reshape(S, -1), sampled_masks.unsqueeze(-1)
intersection_outputs = {name: outs[sampled_masks.expand_as(outs)].reshape(S, -1, outs.size(-1))
for name, outs in intersection_outputs.items()}
ray_start = ray_start[sampled_masks.expand_as(ray_start)].reshape(S, -1, 3)
ray_dir = ray_dir[sampled_masks.expand_as(ray_dir)].reshape(S, -1, 3)
else:
sampled_uv = None
min_depth = intersection_outputs['min_depth']
max_depth = intersection_outputs['max_depth']
pts_idx = intersection_outputs['intersected_voxel_idx']
dists = (max_depth - min_depth).masked_fill(pts_idx.eq(-1), 0)
intersection_outputs['probs'] = dists / dists.sum(dim=-1, keepdim=True)
if getattr(self.args, "fixed_num_samples", 0) > 0:
intersection_outputs['steps'] = intersection_outputs['min_depth'].new_ones(
*intersection_outputs['min_depth'].size()[:-1], 1) * self.args.fixed_num_samples
else:
intersection_outputs['steps'] = dists.sum(-1) / self.encoder.step_size
return ray_start, ray_dir, intersection_outputs, hits, sampled_uv
def raymarching(self, ray_start, ray_dir, intersection_outputs, encoder_states, fine=False):
samples, all_results = super().raymarching(ray_start, ray_dir, intersection_outputs, encoder_states, fine)
all_results['voxel_edges'] = self.encoder.get_edge(ray_start, ray_dir, samples, encoder_states)
all_results['voxel_depth'] = samples['sampled_point_depth'][:, 0]
return samples, all_results
def prepare_hierarchical_sampling(self, intersection_outputs, samples, all_results):
intersection_outputs = super().prepare_hierarchical_sampling(intersection_outputs, samples, all_results)
if getattr(self.args, "fine_num_sample_ratio", 0) > 0:
intersection_outputs['steps'] = samples['sampled_point_voxel_idx'].ne(-1).sum(-1).float() * self.args.fine_num_sample_ratio
return intersection_outputs
def postprocessing(self, ray_start, ray_dir, all_results, hits, sizes):
# we need fill_in for NSVF for background
S, V, P = sizes
fullsize = S * V * P
all_results['missed'] = fill_in((fullsize, ), hits, all_results['missed'], 1.0).view(S, V, P)
all_results['colors'] = fill_in((fullsize, 3), hits, all_results['colors'], 0.0).view(S, V, P, 3)
all_results['depths'] = fill_in((fullsize, ), hits, all_results['depths'], 0.0).view(S, V, P)
BG_DEPTH = self.field.bg_color.depth
bg_color = self.field.bg_color(all_results['colors'])
all_results['colors'] += all_results['missed'].unsqueeze(-1) * bg_color.reshape(fullsize, 3).view(S, V, P, 3)
all_results['depths'] += all_results['missed'] * BG_DEPTH
if 'normal' in all_results:
all_results['normal'] = fill_in((fullsize, 3), hits, all_results['normal'], 0.0).view(S, V, P, 3)
if 'voxel_depth' in all_results:
all_results['voxel_depth'] = fill_in((fullsize, ), hits, all_results['voxel_depth'], BG_DEPTH).view(S, V, P)
if 'voxel_edges' in all_results:
all_results['voxel_edges'] = fill_in((fullsize, 3), hits, all_results['voxel_edges'], 1.0).view(S, V, P, 3)
if 'feat_n2' in all_results:
all_results['feat_n2'] = fill_in((fullsize,), hits, all_results['feat_n2'], 0.0).view(S, V, P)
return all_results
def add_other_logs(self, all_results):
return {'voxs_log': item(self.encoder.voxel_size),
'stps_log': item(self.encoder.step_size),
'nvox_log': item(self.encoder.num_voxels)}
def _visualize(self, images, sample, output, state, **kwargs):
img_id, shape, view, width, name = state
images = super()._visualize(images, sample, output, state, **kwargs)
if 'voxel_edges' in output and output['voxel_edges'] is not None:
# voxel hitting visualization
images['{}_voxel/{}:HWC'.format(name, img_id)] = {
'img': output['voxel_edges'][shape, view].float(),
'min_val': 0,
'max_val': 1,
'weight':
compute_normal_map(
sample['ray_start'][shape, view].float(),
sample['ray_dir'][shape, view].float(),
output['voxel_depth'][shape, view].float(),
sample['extrinsics'][shape, view].float().inverse(),
width, proj=True)
}
if 'feat_n2' in output and output['feat_n2'] is not None:
images['{}_featn2/{}:HWC'.format(name, img_id)] = {
'img': output['feat_n2'][shape, view].float(),
'min_val': 0,
'max_val': 1
}
return images
@torch.no_grad()
def prune_voxels(self, th=0.5, train_stats=False):
self.encoder.pruning(self.field, th, train_stats=train_stats)
self.clean_caches()
@torch.no_grad()
def split_voxels(self):
logger.info("half the global voxel size {:.4f} -> {:.4f}".format(
self.encoder.voxel_size.item(), self.encoder.voxel_size.item() * .5))
self.encoder.splitting()
self.encoder.voxel_size *= .5
self.encoder.max_hits *= 1.5
self.clean_caches()
@torch.no_grad()
def reduce_stepsize(self):
logger.info("reduce the raymarching step size {:.4f} -> {:.4f}".format(
self.encoder.step_size.item(), self.encoder.step_size.item() * .5))
self.encoder.step_size *= .5
def clean_caches(self, reset=False):
self.encoder.clean_runtime_caches()
if reset:
self.encoder.reset_runtime_caches()
@register_model_architecture("nsvf", "nsvf_base")
def base_architecture(args):
# parameter needs to be changed
args.voxel_size = getattr(args, "voxel_size", None)
args.max_hits = getattr(args, "max_hits", 60)
args.raymarching_stepsize = getattr(args, "raymarching_stepsize", 0.01)
args.raymarching_stepsize_ratio = getattr(args, "raymarching_stepsize_ratio", 0.0)
# encoder default parameter
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 32)
args.voxel_path = getattr(args, "voxel_path", None)
args.initial_boundingbox = getattr(args, "initial_boundingbox", None)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:6:32")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_embed_dim = getattr(args, "feature_embed_dim", 256)
args.density_embed_dim = getattr(args, "density_embed_dim", 128)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 256)
# API Update: fix the number of layers
args.feature_layers = getattr(args, "feature_layers", 1)
args.texture_layers = getattr(args, "texture_layers", 3)
args.background_stop_gradient = getattr(args, "background_stop_gradient", False)
args.background_depth = getattr(args, "background_depth", 5.0)
# raymarcher
args.discrete_regularization = getattr(args, "discrete_regularization", False)
args.deterministic_step = getattr(args, "deterministic_step", False)
args.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0)
args.use_octree = getattr(args, "use_octree", False)
# reader
args.pixel_per_view = getattr(args, "pixel_per_view", 2048)
args.sampling_on_mask = getattr(args, "sampling_on_mask", 0.0)
args.sampling_at_center = getattr(args, "sampling_at_center", 1.0)
args.sampling_on_bbox = getattr(args, "sampling_on_bbox", False)
args.sampling_patch_size = getattr(args, "sampling_patch_size", 1)
args.sampling_skipping_size = getattr(args, "sampling_skipping_size", 1)
# others
args.chunk_size = getattr(args, "chunk_size", 64)
args.valid_chunk_size = getattr(args, "valid_chunk_size", 64)
@register_model_architecture("nsvf", "nsvf_xyz")
def nerf2_architecture(args):
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 0)
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, ray:4")
base_architecture(args)
@register_model_architecture("nsvf", "nsvf_nerf")
def nerf_style_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_layers = getattr(args, "feature_layers", 6)
args.texture_layers = getattr(args, "texture_layers", 0)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_nerf_nov")
def nerf_noview_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256")
nerf_style_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn")
def nerf3_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, normal:4, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyz_nope")
def nerf3nope_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:0:3, sigma:0:1, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn_old")
def nerfold_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, normal:0:3, sigma:0:1, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn_nope")
def nerf2nope_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:0:3, normal:0:3, sigma:0:1, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_xyzn_noz")
def nerf3noz_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "pos:10, normal:4, ray:4")
nerf2_architecture(args)
@register_model_architecture("nsvf", "nsvf_embn")
def nerf4_architecture(args):
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:6:32")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, normal:4, ray:4")
base_architecture(args)
@register_model_architecture("nsvf", "nsvf_emb0")
def nerf5_architecture(args):
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 384)
args.inputs_to_density = getattr(args, "inputs_to_density", "emb:0:384")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
base_architecture(args)
@register_model('disco_nsvf')
class DiscoNSVFModel(NSVFModel):
FIELD = "disentangled_radiance_field"
@register_model_architecture("disco_nsvf", "disco_nsvf")
def disco_nsvf_architecture(args):
args.compressed_light_dim = getattr(args, "compressed_light_dim", 64)
nerf3_architecture(args)
@register_model('multi_disco_nsvf')
class mDiscoNSVFModel(NSVFModel):
ENCODER = "multi_sparsevoxel_encoder"
FIELD = "disentangled_radiance_field"
@register_model_architecture("multi_disco_nsvf", "multi_disco_nsvf")
def mdisco_nsvf_architecture(args):
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10, context:0:256")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, normal:4, ray:4, context:0:256")
disco_nsvf_architecture(args)
@register_model('sdf_nsvf')
class SDFNSVFModel(NSVFModel):
FIELD = "sdf_radiance_field"
@register_model_architecture("sdf_nsvf", "sdf_nsvf")
def sdf_nsvf_architecture(args):
args.feature_layers = getattr(args, "feature_layers", 6)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
nerf2nope_architecture(args)
@register_model('sdf_nsvf_sfx')
class SDFSFXNSVFModel(SDFNSVFModel):
FIELD = "sdf_radiance_field"
RAYMARCHER = "surface_volume_rendering"
@register_model_architecture("sdf_nsvf_sfx", "sdf_nsvf_sfx")
def sdf_nsvfsfx_architecture(args):
sdf_nsvf_architecture(args) | 14,499 | 43.072948 | 135 | py |
NSVF | NSVF-main/fairnr/models/nmf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
logger = logging.getLogger(__name__)
import torch
from fairseq.models import (
register_model,
register_model_architecture
)
from fairnr.models.nsvf import NSVFModel
@register_model('nmf')
class NMFModel(NSVFModel):
"""
Experimental code: Neural Mesh Field
"""
ENCODER = 'triangle_mesh_encoder'
@torch.no_grad()
def prune_voxels(self, *args, **kwargs):
pass
@torch.no_grad()
def split_voxels(self):
pass
# logger.info("half the global cage size {:.4f} -> {:.4f}".format(
# self.encoder.cage_size.item(), self.encoder.cage_size.item() * .5))
# self.encoder.cage_size *= .5
@register_model_architecture("nmf", "nmf_base")
def base_architecture(args):
# parameter needs to be changed
args.max_hits = getattr(args, "max_hits", 60)
args.raymarching_stepsize = getattr(args, "raymarching_stepsize", 0.01)
# encoder default parameter
args.voxel_embed_dim = getattr(args, "voxel_embed_dim", 0)
args.voxel_path = getattr(args, "voxel_path", None)
# field
args.inputs_to_density = getattr(args, "inputs_to_density", "pos:10")
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, pos:10, ray:4")
args.feature_embed_dim = getattr(args, "feature_embed_dim", 256)
args.density_embed_dim = getattr(args, "density_embed_dim", 128)
args.texture_embed_dim = getattr(args, "texture_embed_dim", 256)
args.feature_layers = getattr(args, "feature_layers", 1)
args.texture_layers = getattr(args, "texture_layers", 3)
args.background_stop_gradient = getattr(args, "background_stop_gradient", False)
args.background_depth = getattr(args, "background_depth", 5.0)
# raymarcher
args.discrete_regularization = getattr(args, "discrete_regularization", False)
args.deterministic_step = getattr(args, "deterministic_step", False)
args.raymarching_tolerance = getattr(args, "raymarching_tolerance", 0)
# reader
args.pixel_per_view = getattr(args, "pixel_per_view", 2048)
args.sampling_on_mask = getattr(args, "sampling_on_mask", 0.0)
args.sampling_at_center = getattr(args, "sampling_at_center", 1.0)
args.sampling_on_bbox = getattr(args, "sampling_on_bbox", False)
args.sampling_patch_size = getattr(args, "sampling_patch_size", 1)
args.sampling_skipping_size = getattr(args, "sampling_skipping_size", 1)
# others
args.chunk_size = getattr(args, "chunk_size", 64)
@register_model_architecture("nmf", "nmf_nerf")
def nerf_style_architecture(args):
args.inputs_to_texture = getattr(args, "inputs_to_texture", "feat:0:256, ray:4")
args.feature_layers = getattr(args, "feature_layers", 6)
args.texture_layers = getattr(args, "texture_layers", 0)
args.feature_field_skip_connect = getattr(args, "feature_field_skip_connect", 3)
args.no_layernorm_mlp = getattr(args, "no_layernorm_mlp", True)
base_architecture(args) | 3,148 | 36.939759 | 92 | py |
NSVF | NSVF-main/fairnr/clib/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
''' Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch '''
from __future__ import (
division,
absolute_import,
with_statement,
print_function,
unicode_literals,
)
import os, sys
import torch
import torch.nn.functional as F
from torch.autograd import Function
import torch.nn as nn
import sys
import numpy as np
try:
import builtins
except:
import __builtin__ as builtins
try:
import fairnr.clib._ext as _ext
except ImportError:
pass
# raise ImportError(
# "Could not import _ext module.\n"
# "Please see the setup instructions in the README"
# )
MAX_DEPTH = 10000.0
class BallRayIntersect(Function):
@staticmethod
def forward(ctx, radius, n_max, points, ray_start, ray_dir):
inds, min_depth, max_depth = _ext.ball_intersect(
ray_start.float(), ray_dir.float(), points.float(), radius, n_max)
min_depth = min_depth.type_as(ray_start)
max_depth = max_depth.type_as(ray_start)
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(min_depth)
ctx.mark_non_differentiable(max_depth)
return inds, min_depth, max_depth
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None
ball_ray_intersect = BallRayIntersect.apply
class AABBRayIntersect(Function):
@staticmethod
def forward(ctx, voxelsize, n_max, points, ray_start, ray_dir):
# HACK: speed-up ray-voxel intersection by batching...
G = min(2048, int(2 * 10 ** 9 / points.numel())) # HACK: avoid out-of-memory
S, N = ray_start.shape[:2]
K = int(np.ceil(N / G))
H = K * G
if H > N:
ray_start = torch.cat([ray_start, ray_start[:, :H-N]], 1)
ray_dir = torch.cat([ray_dir, ray_dir[:, :H-N]], 1)
ray_start = ray_start.reshape(S * G, K, 3)
ray_dir = ray_dir.reshape(S * G, K, 3)
points = points.expand(S * G, *points.size()[1:]).contiguous()
inds, min_depth, max_depth = _ext.aabb_intersect(
ray_start.float(), ray_dir.float(), points.float(), voxelsize, n_max)
min_depth = min_depth.type_as(ray_start)
max_depth = max_depth.type_as(ray_start)
inds = inds.reshape(S, H, -1)
min_depth = min_depth.reshape(S, H, -1)
max_depth = max_depth.reshape(S, H, -1)
if H > N:
inds = inds[:, :N]
min_depth = min_depth[:, :N]
max_depth = max_depth[:, :N]
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(min_depth)
ctx.mark_non_differentiable(max_depth)
return inds, min_depth, max_depth
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None
aabb_ray_intersect = AABBRayIntersect.apply
class SparseVoxelOctreeRayIntersect(Function):
@staticmethod
def forward(ctx, voxelsize, n_max, points, children, ray_start, ray_dir):
G = min(2048, int(2 * 10 ** 9 / (points.numel() + children.numel()))) # HACK: avoid out-of-memory
S, N = ray_start.shape[:2]
K = int(np.ceil(N / G))
H = K * G
if H > N:
ray_start = torch.cat([ray_start, ray_start[:, :H-N]], 1)
ray_dir = torch.cat([ray_dir, ray_dir[:, :H-N]], 1)
ray_start = ray_start.reshape(S * G, K, 3)
ray_dir = ray_dir.reshape(S * G, K, 3)
points = points.expand(S * G, *points.size()[1:]).contiguous()
children = children.expand(S * G, *children.size()[1:]).contiguous()
inds, min_depth, max_depth = _ext.svo_intersect(
ray_start.float(), ray_dir.float(), points.float(), children.int(), voxelsize, n_max)
min_depth = min_depth.type_as(ray_start)
max_depth = max_depth.type_as(ray_start)
inds = inds.reshape(S, H, -1)
min_depth = min_depth.reshape(S, H, -1)
max_depth = max_depth.reshape(S, H, -1)
if H > N:
inds = inds[:, :N]
min_depth = min_depth[:, :N]
max_depth = max_depth[:, :N]
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(min_depth)
ctx.mark_non_differentiable(max_depth)
return inds, min_depth, max_depth
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None
svo_ray_intersect = SparseVoxelOctreeRayIntersect.apply
class TriangleRayIntersect(Function):
@staticmethod
def forward(ctx, cagesize, blur_ratio, n_max, points, faces, ray_start, ray_dir):
# HACK: speed-up ray-voxel intersection by batching...
G = min(2048, int(2 * 10 ** 9 / (3 * faces.numel()))) # HACK: avoid out-of-memory
S, N = ray_start.shape[:2]
K = int(np.ceil(N / G))
H = K * G
if H > N:
ray_start = torch.cat([ray_start, ray_start[:, :H-N]], 1)
ray_dir = torch.cat([ray_dir, ray_dir[:, :H-N]], 1)
ray_start = ray_start.reshape(S * G, K, 3)
ray_dir = ray_dir.reshape(S * G, K, 3)
face_points = F.embedding(faces.reshape(-1, 3), points.reshape(-1, 3))
face_points = face_points.unsqueeze(0).expand(S * G, *face_points.size()).contiguous()
inds, depth, uv = _ext.triangle_intersect(
ray_start.float(), ray_dir.float(), face_points.float(), cagesize, blur_ratio, n_max)
depth = depth.type_as(ray_start)
uv = uv.type_as(ray_start)
inds = inds.reshape(S, H, -1)
depth = depth.reshape(S, H, -1, 3)
uv = uv.reshape(S, H, -1)
if H > N:
inds = inds[:, :N]
depth = depth[:, :N]
uv = uv[:, :N]
ctx.mark_non_differentiable(inds)
ctx.mark_non_differentiable(depth)
ctx.mark_non_differentiable(uv)
return inds, depth, uv
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None, None
triangle_ray_intersect = TriangleRayIntersect.apply
class UniformRaySampling(Function):
@staticmethod
def forward(ctx, pts_idx, min_depth, max_depth, step_size, max_ray_length, deterministic=False):
G, N, P = 256, pts_idx.size(0), pts_idx.size(1)
H = int(np.ceil(N / G)) * G
if H > N:
pts_idx = torch.cat([pts_idx, pts_idx[:H-N]], 0)
min_depth = torch.cat([min_depth, min_depth[:H-N]], 0)
max_depth = torch.cat([max_depth, max_depth[:H-N]], 0)
pts_idx = pts_idx.reshape(G, -1, P)
min_depth = min_depth.reshape(G, -1, P)
max_depth = max_depth.reshape(G, -1, P)
# pre-generate noise
max_steps = int(max_ray_length / step_size)
max_steps = max_steps + min_depth.size(-1) * 2
noise = min_depth.new_zeros(*min_depth.size()[:-1], max_steps)
if deterministic:
noise += 0.5
else:
noise = noise.uniform_()
# call cuda function
sampled_idx, sampled_depth, sampled_dists = _ext.uniform_ray_sampling(
pts_idx, min_depth.float(), max_depth.float(), noise.float(), step_size, max_steps)
sampled_depth = sampled_depth.type_as(min_depth)
sampled_dists = sampled_dists.type_as(min_depth)
sampled_idx = sampled_idx.reshape(H, -1)
sampled_depth = sampled_depth.reshape(H, -1)
sampled_dists = sampled_dists.reshape(H, -1)
if H > N:
sampled_idx = sampled_idx[: N]
sampled_depth = sampled_depth[: N]
sampled_dists = sampled_dists[: N]
max_len = sampled_idx.ne(-1).sum(-1).max()
sampled_idx = sampled_idx[:, :max_len]
sampled_depth = sampled_depth[:, :max_len]
sampled_dists = sampled_dists[:, :max_len]
ctx.mark_non_differentiable(sampled_idx)
ctx.mark_non_differentiable(sampled_depth)
ctx.mark_non_differentiable(sampled_dists)
return sampled_idx, sampled_depth, sampled_dists
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None, None
uniform_ray_sampling = UniformRaySampling.apply
class InverseCDFRaySampling(Function):
@staticmethod
def forward(ctx, pts_idx, min_depth, max_depth, probs, steps, fixed_step_size=-1, deterministic=False):
G, N, P = 200, pts_idx.size(0), pts_idx.size(1)
H = int(np.ceil(N / G)) * G
if H > N:
pts_idx = torch.cat([pts_idx, pts_idx[:1].expand(H-N, P)], 0)
min_depth = torch.cat([min_depth, min_depth[:1].expand(H-N, P)], 0)
max_depth = torch.cat([max_depth, max_depth[:1].expand(H-N, P)], 0)
probs = torch.cat([probs, probs[:1].expand(H-N, P)], 0)
steps = torch.cat([steps, steps[:1].expand(H-N)], 0)
# print(G, P, np.ceil(N / G), N, H, pts_idx.shape, min_depth.device)
pts_idx = pts_idx.reshape(G, -1, P)
min_depth = min_depth.reshape(G, -1, P)
max_depth = max_depth.reshape(G, -1, P)
probs = probs.reshape(G, -1, P)
steps = steps.reshape(G, -1)
# pre-generate noise
max_steps = steps.ceil().long().max() + P
noise = min_depth.new_zeros(*min_depth.size()[:-1], max_steps)
if deterministic:
noise += 0.5
else:
noise = noise.uniform_().clamp(min=0.001, max=0.999) # in case
# call cuda function
chunk_size = 4 * G # to avoid oom?
results = [
_ext.inverse_cdf_sampling(
pts_idx[:, i:i+chunk_size].contiguous(),
min_depth.float()[:, i:i+chunk_size].contiguous(),
max_depth.float()[:, i:i+chunk_size].contiguous(),
noise.float()[:, i:i+chunk_size].contiguous(),
probs.float()[:, i:i+chunk_size].contiguous(),
steps.float()[:, i:i+chunk_size].contiguous(),
fixed_step_size)
for i in range(0, min_depth.size(1), chunk_size)
]
sampled_idx, sampled_depth, sampled_dists = [
torch.cat([r[i] for r in results], 1)
for i in range(3)
]
sampled_depth = sampled_depth.type_as(min_depth)
sampled_dists = sampled_dists.type_as(min_depth)
sampled_idx = sampled_idx.reshape(H, -1)
sampled_depth = sampled_depth.reshape(H, -1)
sampled_dists = sampled_dists.reshape(H, -1)
if H > N:
sampled_idx = sampled_idx[: N]
sampled_depth = sampled_depth[: N]
sampled_dists = sampled_dists[: N]
max_len = sampled_idx.ne(-1).sum(-1).max()
sampled_idx = sampled_idx[:, :max_len]
sampled_depth = sampled_depth[:, :max_len]
sampled_dists = sampled_dists[:, :max_len]
ctx.mark_non_differentiable(sampled_idx)
ctx.mark_non_differentiable(sampled_depth)
ctx.mark_non_differentiable(sampled_dists)
return sampled_idx, sampled_depth, sampled_dists
@staticmethod
def backward(ctx, a, b, c):
return None, None, None, None, None, None, None
inverse_cdf_sampling = InverseCDFRaySampling.apply
# back-up for ray point sampling
@torch.no_grad()
def _parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=False):
# uniform sampling
_min_depth = min_depth.min(1)[0]
_max_depth = max_depth.masked_fill(max_depth.eq(MAX_DEPTH), 0).max(1)[0]
max_ray_length = (_max_depth - _min_depth).max()
delta = torch.arange(int(max_ray_length / MARCH_SIZE), device=min_depth.device, dtype=min_depth.dtype)
delta = delta[None, :].expand(min_depth.size(0), delta.size(-1))
if deterministic:
delta = delta + 0.5
else:
delta = delta + delta.clone().uniform_().clamp(min=0.01, max=0.99)
delta = delta * MARCH_SIZE
sampled_depth = min_depth[:, :1] + delta
sampled_idx = (sampled_depth[:, :, None] >= min_depth[:, None, :]).sum(-1) - 1
sampled_idx = pts_idx.gather(1, sampled_idx)
# include all boundary points
sampled_depth = torch.cat([min_depth, max_depth, sampled_depth], -1)
sampled_idx = torch.cat([pts_idx, pts_idx, sampled_idx], -1)
# reorder
sampled_depth, ordered_index = sampled_depth.sort(-1)
sampled_idx = sampled_idx.gather(1, ordered_index)
sampled_dists = sampled_depth[:, 1:] - sampled_depth[:, :-1] # distances
sampled_depth = .5 * (sampled_depth[:, 1:] + sampled_depth[:, :-1]) # mid-points
# remove all invalid depths
min_ids = (sampled_depth[:, :, None] >= min_depth[:, None, :]).sum(-1) - 1
max_ids = (sampled_depth[:, :, None] >= max_depth[:, None, :]).sum(-1)
sampled_depth.masked_fill_(
(max_ids.ne(min_ids)) |
(sampled_depth > _max_depth[:, None]) |
(sampled_dists == 0.0)
, MAX_DEPTH)
sampled_depth, ordered_index = sampled_depth.sort(-1) # sort again
sampled_masks = sampled_depth.eq(MAX_DEPTH)
num_max_steps = (~sampled_masks).sum(-1).max()
sampled_depth = sampled_depth[:, :num_max_steps]
sampled_dists = sampled_dists.gather(1, ordered_index).masked_fill_(sampled_masks, 0.0)[:, :num_max_steps]
sampled_idx = sampled_idx.gather(1, ordered_index).masked_fill_(sampled_masks, -1)[:, :num_max_steps]
return sampled_idx, sampled_depth, sampled_dists
@torch.no_grad()
def parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=False):
chunk_size=4096
full_size = min_depth.shape[0]
if full_size <= chunk_size:
return _parallel_ray_sampling(MARCH_SIZE, pts_idx, min_depth, max_depth, deterministic=deterministic)
outputs = zip(*[
_parallel_ray_sampling(
MARCH_SIZE,
pts_idx[i:i+chunk_size], min_depth[i:i+chunk_size], max_depth[i:i+chunk_size],
deterministic=deterministic)
for i in range(0, full_size, chunk_size)])
sampled_idx, sampled_depth, sampled_dists = outputs
def padding_points(xs, pad):
if len(xs) == 1:
return xs[0]
maxlen = max([x.size(1) for x in xs])
full_size = sum([x.size(0) for x in xs])
xt = xs[0].new_ones(full_size, maxlen).fill_(pad)
st = 0
for i in range(len(xs)):
xt[st: st + xs[i].size(0), :xs[i].size(1)] = xs[i]
st += xs[i].size(0)
return xt
sampled_idx = padding_points(sampled_idx, -1)
sampled_depth = padding_points(sampled_depth, MAX_DEPTH)
sampled_dists = padding_points(sampled_dists, 0.0)
return sampled_idx, sampled_depth, sampled_dists
| 14,842 | 37.553247 | 110 | py |
NSVF | NSVF-main/fairnr/data/data_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import functools
import cv2
import math
import numpy as np
import imageio
from glob import glob
import os
import copy
import shutil
import skimage.metrics
import pandas as pd
import pylab as plt
import fairseq.distributed_utils as du
from plyfile import PlyData, PlyElement
from fairseq.meters import StopwatchMeter
def get_rank():
try:
return du.get_rank()
except AssertionError:
return 0
def get_world_size():
try:
return du.get_world_size()
except AssertionError:
return 1
def parse_views(view_args):
output = []
try:
xx = view_args.split(':')
ids = xx[0].split(',')
for id in ids:
if '..' in id:
a, b = id.split('..')
output += list(range(int(a), int(b)))
else:
output += [int(id)]
if len(xx) > 1:
output = output[::int(xx[-1])]
except Exception as e:
raise Exception("parse view args error: {}".format(e))
return output
def get_uv(H, W, h, w):
"""
H, W: real image (intrinsics)
h, w: resized image
"""
uv = np.flip(np.mgrid[0: h, 0: w], axis=0).astype(np.float32)
uv[0] = uv[0] * float(W / w)
uv[1] = uv[1] * float(H / h)
return uv, [float(H / h), float(W / w)]
def load_rgb(
path,
resolution=None,
with_alpha=True,
bg_color=[1.0, 1.0, 1.0],
min_rgb=-1,
interpolation='AREA'):
if with_alpha:
img = imageio.imread(path) # RGB-ALPHA
else:
img = imageio.imread(path)[:, :, :3]
img = skimage.img_as_float32(img).astype('float32')
H, W, D = img.shape
h, w = resolution
if D == 3:
img = np.concatenate([img, np.ones((img.shape[0], img.shape[1], 1))], -1).astype('float32')
uv, ratio = get_uv(H, W, h, w)
if (h < H) or (w < W):
# img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA).astype('float32')
if min_rgb == -1: # 0, 1 --> -1, 1
img[:, :, :3] -= 0.5
img[:, :, :3] *= 2.
img[:, :, :3] = img[:, :, :3] * img[:, :, 3:] + np.asarray(bg_color)[None, None, :] * (1 - img[:, :, 3:])
img[:, :, 3] = img[:, :, 3] * (img[:, :, :3] != np.asarray(bg_color)[None, None, :]).any(-1)
img = img.transpose(2, 0, 1)
return img, uv, ratio
def load_depth(path, resolution=None, depth_plane=5):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_UNCHANGED).astype(np.float32)
# ret, img = cv2.threshold(img, depth_plane, depth_plane, cv2.THRESH_TRUNC)
H, W = img.shape[:2]
h, w = resolution
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
#img = cv2.resize(img, (w, h), interpolation=cv2.INTER_LINEAR)
if len(img.shape) ==3:
img = img[:,:,:1]
img = img.transpose(2,0,1)
else:
img = img[None,:,:]
return img
def load_mask(path, resolution=None):
if path is None:
return None
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32)
h, w = resolution
H, W = img.shape[:2]
if (h < H) or (w < W):
img = cv2.resize(img, (w, h), interpolation=cv2.INTER_NEAREST).astype('float32')
img = img / (img.max() + 1e-7)
return img
def load_matrix(path):
lines = [[float(w) for w in line.strip().split()] for line in open(path)]
if len(lines[0]) == 2:
lines = lines[1:]
if len(lines[-1]) == 2:
lines = lines[:-1]
return np.array(lines).astype(np.float32)
def load_intrinsics(filepath, resized_width=None, invert_y=False):
try:
intrinsics = load_matrix(filepath)
if intrinsics.shape[0] == 3 and intrinsics.shape[1] == 3:
_intrinsics = np.zeros((4, 4), np.float32)
_intrinsics[:3, :3] = intrinsics
_intrinsics[3, 3] = 1
intrinsics = _intrinsics
if intrinsics.shape[0] == 1 and intrinsics.shape[1] == 16:
intrinsics = intrinsics.reshape(4, 4)
return intrinsics
except ValueError:
pass
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic
def unflatten_img(img, width=512):
sizes = img.size()
height = sizes[-1] // width
return img.reshape(*sizes[:-1], height, width)
def square_crop_img(img):
if img.shape[0] == img.shape[1]:
return img # already square
min_dim = np.amin(img.shape[:2])
center_coord = np.array(img.shape[:2]) // 2
img = img[center_coord[0] - min_dim // 2:center_coord[0] + min_dim // 2,
center_coord[1] - min_dim // 2:center_coord[1] + min_dim // 2]
return img
def sample_pixel_from_image(
num_pixel, num_sample,
mask=None, ratio=1.0,
use_bbox=False,
center_ratio=1.0,
width=512,
patch_size=1):
if patch_size > 1:
assert (num_pixel % (patch_size * patch_size) == 0) \
and (num_sample % (patch_size * patch_size) == 0), "size must match"
_num_pixel = num_pixel // (patch_size * patch_size)
_num_sample = num_sample // (patch_size * patch_size)
height = num_pixel // width
_mask = None if mask is None else \
mask.reshape(height, width).reshape(
height//patch_size, patch_size, width//patch_size, patch_size
).any(1).any(-1).reshape(-1)
_width = width // patch_size
_out = sample_pixel_from_image(_num_pixel, _num_sample, _mask, ratio, use_bbox, _width)
_x, _y = _out % _width, _out // _width
x, y = _x * patch_size, _y * patch_size
x = x[:, None, None] + np.arange(patch_size)[None, :, None]
y = y[:, None, None] + np.arange(patch_size)[None, None, :]
out = x + y * width
return out.reshape(-1)
if center_ratio < 1.0:
r = (1 - center_ratio) / 2.0
H, W = num_pixel // width, width
mask0 = np.zeros((H, W))
mask0[int(H * r): H - int(H * r), int(W * r): W - int(W * r)] = 1
mask0 = mask0.reshape(-1)
if mask is None:
mask = mask0
else:
mask = mask * mask0
if mask is not None:
mask = (mask > 0.0).astype('float32')
if (mask is None) or \
(ratio <= 0.0) or \
(mask.sum() == 0) or \
((1 - mask).sum() == 0):
return np.random.choice(num_pixel, num_sample)
if use_bbox:
mask = mask.reshape(-1, width)
x, y = np.where(mask == 1)
mask = np.zeros_like(mask)
mask[x.min(): x.max()+1, y.min(): y.max()+1] = 1.0
mask = mask.reshape(-1)
try:
probs = mask * ratio / (mask.sum()) + (1 - mask) / (num_pixel - mask.sum()) * (1 - ratio)
# x = np.random.choice(num_pixel, num_sample, True, p=probs)
return np.random.choice(num_pixel, num_sample, True, p=probs)
except Exception:
return np.random.choice(num_pixel, num_sample)
def colormap(dz):
return plt.cm.jet(dz)
# return plt.cm.viridis(dz)
# return plt.cm.gray(dz)
def recover_image(img, min_val=-1, max_val=1, width=512, bg=None, weight=None, raw=False):
if raw: return img
sizes = img.size()
height = sizes[0] // width
img = img.float().to('cpu')
if len(sizes) == 1 and (bg is not None):
bg_mask = img.eq(bg)[:, None].type_as(img)
img = ((img - min_val) / (max_val - min_val)).clamp(min=0, max=1)
if len(sizes) == 1:
img = torch.from_numpy(colormap(img.numpy())[:, :3])
if weight is not None:
weight = weight.float().to('cpu')
img = img * weight[:, None]
if bg is not None:
img = img * (1 - bg_mask) + bg_mask
img = img.reshape(height, width, -1)
return img
def write_images(writer, images, updates):
for tag in images:
img = images[tag]
tag, dataform = tag.split(':')
writer.add_image(tag, img, updates, dataformats=dataform)
def compute_psnr(p, t):
"""Compute PSNR of model image predictions.
:param prediction: Return value of forward pass.
:param ground_truth: Ground truth.
:return: (psnr, ssim): tuple of floats
"""
ssim = skimage.metrics.structural_similarity(p, t, multichannel=True, data_range=1)
psnr = skimage.metrics.peak_signal_noise_ratio(p, t, data_range=1)
return ssim, psnr
def save_point_cloud(filename, xyz, rgb=None):
if rgb is None:
vertex = np.array([(xyz[k, 0], xyz[k, 1], xyz[k, 2]) for k in range(xyz.shape[0])],
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
else:
vertex = np.array([(xyz[k, 0], xyz[k, 1], xyz[k, 2], rgb[k, 0], rgb[k, 1], rgb[k, 2]) for k in range(xyz.shape[0])],
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
# PlyData([PlyElement.describe(vertex, 'vertex')], text=True).write(filename)
# from fairseq import pdb; pdb.set_trace()
PlyData([PlyElement.describe(vertex, 'vertex')]).write(open(filename, 'wb'))
class InfIndex(object):
def __init__(self, index_list, shuffle=False):
self.index_list = index_list
self.size = len(index_list)
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
if self.shuffle:
self._perm = np.random.permutation(self.index_list).tolist()
else:
self._perm = copy.deepcopy(self.index_list)
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return self.size
class Timer(StopwatchMeter):
def __enter__(self):
"""Start a new timer as a context manager"""
self.start()
return self
def __exit__(self, *exc_info):
"""Stop the context manager timer"""
self.stop()
class GPUTimer(object):
def __enter__(self):
"""Start a new timer as a context manager"""
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
self.start.record()
self.sum = 0
return self
def __exit__(self, *exc_info):
"""Stop the context manager timer"""
self.end.record()
torch.cuda.synchronize()
self.sum = self.start.elapsed_time(self.end) / 1000.
| 11,063 | 28.902703 | 125 | py |
NSVF | NSVF-main/fairnr/data/shape_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, glob
import copy
import numpy as np
import torch
import logging
from collections import defaultdict
from fairseq.data import FairseqDataset, BaseWrapperDataset
from . import data_utils, geometry, trajectory
logger = logging.getLogger(__name__)
class ShapeDataset(FairseqDataset):
"""
A dataset that only returns data per shape
"""
def __init__(self,
paths,
preload=True,
repeat=1,
subsample_valid=-1,
ids=None):
if os.path.isdir(paths):
self.paths = [paths]
else:
self.paths = [line.strip() for line in open(paths)]
self.subsample_valid = subsample_valid
self.total_num_shape = len(self.paths)
self.cache = None
self.repeat = repeat
# -- load per-shape data
_data_per_shape = {}
_data_per_shape['shape'] = list(range(len(self.paths)))
_ixts = self.find_intrinsics()
_glbs = self.find_global()
if len(_ixts) > 0:
_data_per_shape['ixt'] = _ixts
if len(_glbs) > 0:
_data_per_shape['glb'] = _glbs
if self.subsample_valid > -1:
for key in _data_per_shape:
_data_per_shape[key] = _data_per_shape[key][::self.subsample_valid]
self.paths = self.paths[::self.subsample_valid]
self.total_num_shape = len(self.paths)
# group the data..
data_list = []
for r in range(repeat):
# HACK: making several copies to enable multi-GPU usage.
if r == 0 and preload:
self.cache = []
logger.info('pre-load the dataset into memory.')
for id in range(self.total_num_shape):
element = {}
for key in _data_per_shape:
element[key] = _data_per_shape[key][id]
data_list.append(element)
if r == 0 and preload:
self.cache += [self._load_batch(data_list, id)]
# group the data together
self.data = data_list
def find_intrinsics(self):
ixt_list = []
for path in self.paths:
if os.path.exists(path + '/intrinsic.txt'):
ixt_list.append(path + '/intrinsic.txt')
elif os.path.exists(path + '/intrinsics.txt'):
ixt_list.append(path + '/intrinsics.txt')
return ixt_list
def find_global(self):
glb_list = []
for path in self.paths:
if os.path.exists(path + '/global.txt'):
glb_list.append(path + '/global.txt')
return glb_list
def _load_shape(self, packed_data):
intrinsics = data_utils.load_intrinsics(packed_data['ixt']).astype('float32') \
if packed_data.get('ixt', None) is not None else None
shape_id = packed_data['shape']
shape_data = {'intrinsics': intrinsics, 'id': shape_id}
if packed_data.get('glb', None) is not None: # additional global feature (if any)
shape_data['global_index'] = np.loadtxt(packed_data['glb']).astype('int64')
return shape_data
def _load_batch(self, data, index):
return index, self._load_shape(data[index])
def __getitem__(self, index):
if self.cache is not None:
return self.cache[index % self.total_num_shape][0], \
self.cache[index % self.total_num_shape][1]
return self._load_batch(self.data, index)
def __len__(self):
return len(self.data)
def num_tokens(self, index):
return 1
def _collater(self, samples):
results = {}
results['shape'] = torch.from_numpy(np.array([s[0] for s in samples]))
for key in samples[0][1]:
if samples[0][1][key] is not None:
results[key] = torch.from_numpy(
np.array([s[1][key] for s in samples]))
else:
results[key] = None
return results
def collater(self, samples):
try:
results = self._collater(samples)
except IndexError:
results = None
return results
class ShapeViewDataset(ShapeDataset):
"""
A dataset contains a series of images renderred offline for an object.
"""
def __init__(self,
paths,
views,
num_view,
subsample_valid=-1,
resolution=None,
load_depth=False,
load_mask=False,
train=True,
preload=True,
repeat=1,
binarize=True,
bg_color="1,1,1",
min_color=-1,
ids=None):
super().__init__(paths, False, repeat, subsample_valid, ids)
self.train = train
self.load_depth = load_depth
self.load_mask = load_mask
self.views = views
self.num_view = num_view
if isinstance(resolution, str):
self.resolution = [int(r) for r in resolution.split('x')]
else:
self.resolution = [resolution, resolution]
self.world2camera = True
self.cache_view = None
bg_color = [float(b) for b in bg_color.split(',')] \
if isinstance(bg_color, str) else [bg_color]
if min_color == -1:
bg_color = [b * 2 - 1 for b in bg_color]
if len(bg_color) == 1:
bg_color = bg_color + bg_color + bg_color
self.bg_color = bg_color
self.min_color = min_color
self.apply_mask_color = (self.bg_color[0] >= -1) & (self.bg_color[0] <= 1) # if need to apply
# -- load per-view data
_data_per_view = {}
_data_per_view['rgb'] = self.find_rgb()
_data_per_view['ext'] = self.find_extrinsics()
if self.find_intrinsics_per_view() is not None:
_data_per_view['ixt_v'] = self.find_intrinsics_per_view()
if self.load_depth:
_data_per_view['dep'] = self.find_depth()
if self.load_mask:
_data_per_view['mask'] = self.find_mask()
_data_per_view['view'] = self.summary_view_data(_data_per_view)
# group the data.
_index = 0
for r in range(repeat):
# HACK: making several copies to enable multi-GPU usage.
if r == 0 and preload:
self.cache = []
logger.info('pre-load the dataset into memory.')
for id in range(self.total_num_shape):
element = {}
total_num_view = len(_data_per_view['rgb'][id])
perm_ids = np.random.permutation(total_num_view) if train else np.arange(total_num_view)
for key in _data_per_view:
element[key] = [_data_per_view[key][id][i] for i in perm_ids]
self.data[_index].update(element)
if r == 0 and preload:
phase_name = f"{'train' if self.train else 'valid'}" + \
f".{self.resolution[0]}x{self.resolution[1]}" + \
f"{'.d' if load_depth else ''}" + \
f"{'.m' if load_mask else ''}" + \
f"{'b' if not self.apply_mask_color else ''}" + \
"_full"
logger.info("preload {}-{}".format(id, phase_name))
if binarize:
cache = self._load_binary(id, np.arange(total_num_view), phase_name)
else:
cache = self._load_batch(self.data, id, np.arange(total_num_view))
self.cache += [cache]
_index += 1
# group the data together
self.data_index = []
for i, d in enumerate(self.data):
if self.train:
index_list = list(range(len(d['rgb'])))
self.data_index.append(
data_utils.InfIndex(index_list, shuffle=True)
)
else:
copy_id = i // self.total_num_shape
index_list = []
for j in range(copy_id * num_view, copy_id * num_view + num_view):
index_list.append(j % len(d['rgb']))
self.data_index.append(
data_utils.InfIndex(index_list, shuffle=False)
)
def _load_binary(self, id, views, phase='train'):
root = os.path.dirname(self.data[id]['shape'])
npzfile = os.path.join(root, '{}.npz'.format(phase))
try:
with np.load(npzfile, allow_pickle=True) as f:
return f['cache']
except Exception:
cache = self._load_batch(self.data, id, views)
if data_utils.get_rank() == 0:
np.savez(npzfile, cache=cache)
return cache
def select(self, file_list):
if len(file_list[0]) == 0:
raise FileNotFoundError
return [[files[i] for i in self.views] for files in file_list]
def find_rgb(self):
try:
return self.select([sorted(glob.glob(path + '/rgb/*.*g')) for path in self.paths])
except FileNotFoundError:
try:
return self.select([sorted(glob.glob(path + '/color/*.*g')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find rendered images.")
def find_depth(self):
try:
return self.select([sorted(glob.glob(path + '/depth/*.exr')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find estimated depths images")
def find_mask(self):
try:
return self.select([sorted(glob.glob(path + '/mask/*')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError("CANNOT find precomputed mask images")
def find_extrinsics(self):
try:
return self.select([sorted(glob.glob(path + '/extrinsic/*.txt')) for path in self.paths])
except FileNotFoundError:
try:
self.world2camera = False
return self.select([sorted(glob.glob(path + '/pose/*.txt')) for path in self.paths])
except FileNotFoundError:
raise FileNotFoundError('world2camera or camera2world matrices not found.')
def find_intrinsics_per_view(self):
try:
return self.select([sorted(glob.glob(path + '/intrinsic/*.txt')) for path in self.paths])
except FileNotFoundError:
return None
def summary_view_data(self, _data_per_view):
keys = [k for k in _data_per_view if _data_per_view[k] is not None]
num_of_objects = len(_data_per_view[keys[0]])
for k in range(num_of_objects):
assert len(set([len(_data_per_view[key][k]) for key in keys])) == 1, "numer of views must be consistent."
return [list(range(len(_data_per_view[keys[0]][k]))) for k in range(num_of_objects)]
def num_tokens(self, index):
return self.num_view
def _load_view(self, packed_data, view_idx):
image, uv, ratio = data_utils.load_rgb(
packed_data['rgb'][view_idx],
resolution=self.resolution,
bg_color=self.bg_color,
min_rgb=self.min_color)
rgb, alpha = image[:3], image[3] # C x H x W for RGB
extrinsics = data_utils.load_matrix(packed_data['ext'][view_idx])
extrinsics = geometry.parse_extrinsics(extrinsics, self.world2camera).astype('float32') # this is C2W
intrinsics = data_utils.load_intrinsics(packed_data['ixt_v'][view_idx]).astype('float32') \
if packed_data.get('ixt_v', None) is not None else None
z, mask = None, None
if packed_data.get('dep', None) is not None:
z = data_utils.load_depth(packed_data['dep'][view_idx], resolution=self.resolution)
if packed_data.get('mask', None) is not None:
mask = data_utils.load_mask(packed_data['mask'][view_idx], resolution=self.resolution)
if self.apply_mask_color: # we can also not apply mask
rgb = rgb * mask[None, :, :] + (1 - mask[None, :, :]) * np.asarray(self.bg_color)[:, None, None]
return {
'path': packed_data['rgb'][view_idx],
'view': view_idx,
'uv': uv.reshape(2, -1),
'colors': rgb.reshape(3, -1),
'alpha': alpha.reshape(-1),
'extrinsics': extrinsics,
'intrinsics': intrinsics,
'depths': z.reshape(-1) if z is not None else None,
'mask': mask.reshape(-1) if mask is not None else None,
'size': np.array([rgb.shape[1], rgb.shape[2]] + ratio, dtype=np.float32)
}
def _load_batch(self, data, index, view_ids=None):
if view_ids is None:
view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
return index, self._load_shape(data[index]), [self._load_view(data[index], view_id) for view_id in view_ids]
def __getitem__(self, index):
if self.cache is not None:
view_ids = [next(self.data_index[index]) for _ in range(self.num_view)]
return copy.deepcopy(self.cache[index % self.total_num_shape][0]), \
copy.deepcopy(self.cache[index % self.total_num_shape][1]), \
[copy.deepcopy(self.cache[index % self.total_num_shape][2][i]) for i in view_ids]
return self._load_batch(self.data, index)
def collater(self, samples):
results = super().collater(samples)
if results is None:
return results
for key in samples[0][2][0]:
if key == 'path':
results[key] = [[d[key] for d in s[2]] for s in samples]
elif samples[0][2][0][key] is not None:
results[key] = torch.from_numpy(
np.array([[d[key] for d in s[2]] for s in samples])
)
results['colors'] = results['colors'].transpose(2, 3)
if results.get('full_rgb', None) is not None:
results['full_rgb'] = results['full_rgb'].transpose(2, 3)
return results
class ShapeViewStreamDataset(BaseWrapperDataset):
"""
Different from ShapeViewDataset.
We merge all the views together into one dataset regardless of the shapes.
** HACK **: an alternative of the ShapeViewDataset
"""
def __init__(self, dataset):
super().__init__(dataset)
self.dataset.repeat == 1
self.dataset.num_view == 1
self.total_num_shape = dataset.total_num_shape
# reset the data_index
self.dataset.data_index = []
for i, d in enumerate(self.data):
for j, _ in enumerate(d['rgb']):
self.dataset.data_index.append((i, j)) # shape i, view j
def __len__(self):
return len(self.dataset.data_index)
def ordered_indices(self):
return np.arange(len(self))
@property
def cache(self):
return self.dataset.cache
@property
def data(self):
return self.dataset.data
def _load_batch(self, data, shape_id, view_ids):
return shape_id, self.dataset._load_shape(data[shape_id]), [self.dataset._load_view(data[shape_id], view_id) for view_id in view_ids]
def __getitem__(self, index):
shape_id, view_id = self.dataset.data_index[index]
if self.cache is not None:
return copy.deepcopy(self.cache[shape_id % self.total_num_shape][0]), \
copy.deepcopy(self.cache[shape_id % self.total_num_shape][1]), \
[copy.deepcopy(self.cache[shape_id % self.total_num_shape][2][view_id])]
return self._load_batch(self.data, shape_id, [view_id])
def _load_binary(self, id, views, phase='train'):
root = os.path.dirname(self.data[id]['ixt'])
npzfile = os.path.join(root, '{}.npz'.format(phase))
try:
with np.load(npzfile, allow_pickle=True) as f:
return f['cache']
except Exception:
caches = [self._load_batch(self.data, id, view_id) for view_id in views]
cache = [caches[0][0], caches[0][1], [caches[i][2][0] for i in range(len(views))]]
if data_utils.get_rank() == 0:
np.savez(npzfile, cache=cache)
return cache
class SampledPixelDataset(BaseWrapperDataset):
"""
A wrapper dataset, which split rendered images into pixels
"""
def __init__(self,
dataset,
num_sample=None,
sampling_on_mask=1.0,
sampling_on_bbox=False,
sampling_at_center=1.0,
resolution=512,
patch_size=1):
super().__init__(dataset)
self.num_sample = num_sample
self.sampling_on_mask = sampling_on_mask
self.sampling_on_bbox = sampling_on_bbox
self.sampling_at_center = sampling_at_center
self.patch_size = patch_size
self.res = resolution
def __getitem__(self, index):
index, data_per_shape, data_per_view = self.dataset[index]
# sample pixels from the original images
sample_index = [
data_utils.sample_pixel_from_image(
data['alpha'].shape[-1],
self.num_sample,
data.get('mask', None)
if data.get('mask', None) is not None
else data.get('alpha', None),
self.sampling_on_mask,
self.sampling_on_bbox,
self.sampling_at_center,
width=int(data['size'][1]),
patch_size=self.patch_size)
for data in data_per_view
]
for i, data in enumerate(data_per_view):
data_per_view[i]['full_rgb'] = copy.deepcopy(data['colors'])
for key in data:
if data[key] is not None \
and (key != 'extrinsics' and key != 'view' and key != 'full_rgb') \
and data[key].shape[-1] > self.num_sample:
if len(data[key].shape) == 2:
data_per_view[i][key] = data[key][:, sample_index[i]]
else:
data_per_view[i][key] = data[key][sample_index[i]]
data_per_view[i]['index'] = sample_index[i]
return index, data_per_shape, data_per_view
def num_tokens(self, index):
return self.dataset.num_view * self.num_sample
class WorldCoordDataset(BaseWrapperDataset):
"""
A wrapper dataset. transform UV space into World space
"""
def __getitem__(self, index):
index, data_per_shape, data_per_view = self.dataset[index]
def camera2world(data):
inv_RT = data['extrinsics']
intrinsics = data_per_shape['intrinsics']
# get camera center (XYZ)
ray_start = inv_RT[:3, 3]
# get points at a random depth (=1)
ray_dir = geometry.get_ray_direction(
ray_start, data['uv'], intrinsics, inv_RT, 1
)
# here we still keep the original data for tracking purpose
data.update({
'ray_start': ray_start,
'ray_dir': ray_dir,
})
return data
return index, data_per_shape, [camera2world(data) for data in data_per_view]
def collater(self, samples):
results = self.dataset.collater(samples)
if results is None:
return results
results['ray_start'] = results['ray_start'].unsqueeze(-2)
results['ray_dir'] = results['ray_dir'].transpose(2, 3)
results['colors'] = results['colors'].transpose(2, 3)
if results.get('full_rgb', None) is not None:
results['full_rgb'] = results['full_rgb'].transpose(2, 3)
return results
class InfiniteDataset(BaseWrapperDataset):
"""
A wrapper dataset which supports infnite sampling from a dataset.
No epochs in this case.
"""
def __init__(self, dataset, max_len=1000000):
super().__init__(dataset)
self.MAXLEN = max_len
def __len__(self):
return self.MAXLEN
def ordered_indices(self):
return np.arange(self.MAXLEN)
def __getitem__(self, index):
actual_length = len(self.dataset)
return self.dataset[index % actual_length] | 20,801 | 36.821818 | 141 | py |
NSVF | NSVF-main/fairnr/data/geometry.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn.functional as F
from fairnr.data import data_utils as D
try:
from fairnr.clib._ext import build_octree
except ImportError:
pass
INF = 1000.0
def ones_like(x):
T = torch if isinstance(x, torch.Tensor) else np
return T.ones_like(x)
def stack(x):
T = torch if isinstance(x[0], torch.Tensor) else np
return T.stack(x)
def matmul(x, y):
T = torch if isinstance(x, torch.Tensor) else np
return T.matmul(x, y)
def cross(x, y, axis=0):
T = torch if isinstance(x, torch.Tensor) else np
return T.cross(x, y, axis)
def cat(x, axis=1):
if isinstance(x[0], torch.Tensor):
return torch.cat(x, dim=axis)
return np.concatenate(x, axis=axis)
def normalize(x, axis=-1, order=2):
if isinstance(x, torch.Tensor):
l2 = x.norm(p=order, dim=axis, keepdim=True)
return x / (l2 + 1e-8), l2
else:
l2 = np.linalg.norm(x, order, axis)
l2 = np.expand_dims(l2, axis)
l2[l2==0] = 1
return x / l2, l2
def parse_extrinsics(extrinsics, world2camera=True):
""" this function is only for numpy for now"""
if extrinsics.shape[0] == 3 and extrinsics.shape[1] == 4:
extrinsics = np.vstack([extrinsics, np.array([[0, 0, 0, 1.0]])])
if extrinsics.shape[0] == 1 and extrinsics.shape[1] == 16:
extrinsics = extrinsics.reshape(4, 4)
if world2camera:
extrinsics = np.linalg.inv(extrinsics).astype(np.float32)
return extrinsics
def parse_intrinsics(intrinsics):
fx = intrinsics[0, 0]
fy = intrinsics[1, 1]
cx = intrinsics[0, 2]
cy = intrinsics[1, 2]
return fx, fy, cx, cy
def uv2cam(uv, z, intrinsics, homogeneous=False):
fx, fy, cx, cy = parse_intrinsics(intrinsics)
x_lift = (uv[0] - cx) / fx * z
y_lift = (uv[1] - cy) / fy * z
z_lift = ones_like(x_lift) * z
if homogeneous:
return stack([x_lift, y_lift, z_lift, ones_like(z_lift)])
else:
return stack([x_lift, y_lift, z_lift])
def cam2world(xyz_cam, inv_RT):
return matmul(inv_RT, xyz_cam)[:3]
def r6d2mat(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalisation per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def get_ray_direction(ray_start, uv, intrinsics, inv_RT, depths=None):
if depths is None:
depths = 1
rt_cam = uv2cam(uv, depths, intrinsics, True)
rt = cam2world(rt_cam, inv_RT)
ray_dir, _ = normalize(rt - ray_start[:, None], axis=0)
return ray_dir
def look_at_rotation(camera_position, at=None, up=None, inverse=False, cv=False):
"""
This function takes a vector 'camera_position' which specifies the location
of the camera in world coordinates and two vectors `at` and `up` which
indicate the position of the object and the up directions of the world
coordinate system respectively. The object is assumed to be centered at
the origin.
The output is a rotation matrix representing the transformation
from world coordinates -> view coordinates.
Input:
camera_position: 3
at: 1 x 3 or N x 3 (0, 0, 0) in default
up: 1 x 3 or N x 3 (0, 1, 0) in default
"""
if at is None:
at = torch.zeros_like(camera_position)
else:
at = torch.tensor(at).type_as(camera_position)
if up is None:
up = torch.zeros_like(camera_position)
up[2] = -1
else:
up = torch.tensor(up).type_as(camera_position)
z_axis = normalize(at - camera_position)[0]
x_axis = normalize(cross(up, z_axis))[0]
y_axis = normalize(cross(z_axis, x_axis))[0]
R = cat([x_axis[:, None], y_axis[:, None], z_axis[:, None]], axis=1)
return R
def ray(ray_start, ray_dir, depths):
return ray_start + ray_dir * depths
def compute_normal_map(ray_start, ray_dir, depths, RT, width=512, proj=False):
# TODO:
# this function is pytorch-only (for not)
wld_coords = ray(ray_start, ray_dir, depths.unsqueeze(-1)).transpose(0, 1)
cam_coords = matmul(RT[:3, :3], wld_coords) + RT[:3, 3].unsqueeze(-1)
cam_coords = D.unflatten_img(cam_coords, width)
# estimate local normal
shift_l = cam_coords[:, 2:, :]
shift_r = cam_coords[:, :-2, :]
shift_u = cam_coords[:, :, 2: ]
shift_d = cam_coords[:, :, :-2]
diff_hor = normalize(shift_r - shift_l, axis=0)[0][:, :, 1:-1]
diff_ver = normalize(shift_u - shift_d, axis=0)[0][:, 1:-1, :]
normal = cross(diff_hor, diff_ver)
_normal = normal.new_zeros(*cam_coords.size())
_normal[:, 1:-1, 1:-1] = normal
_normal = _normal.reshape(3, -1).transpose(0, 1)
# compute the projected color
if proj:
_normal = normalize(_normal, axis=1)[0]
wld_coords0 = ray(ray_start, ray_dir, 0).transpose(0, 1)
cam_coords0 = matmul(RT[:3, :3], wld_coords0) + RT[:3, 3].unsqueeze(-1)
cam_coords0 = D.unflatten_img(cam_coords0, width)
cam_raydir = normalize(cam_coords - cam_coords0, 0)[0].reshape(3, -1).transpose(0, 1)
proj_factor = (_normal * cam_raydir).sum(-1).abs() * 0.8 + 0.2
return proj_factor
return _normal
def trilinear_interp(p, q, point_feats):
weights = (p * q + (1 - p) * (1 - q)).prod(dim=-1, keepdim=True)
if point_feats.dim() == 2:
point_feats = point_feats.view(point_feats.size(0), 8, -1)
point_feats = (weights * point_feats).sum(1)
return point_feats
# helper functions for encoder
def padding_points(xs, pad):
if len(xs) == 1:
return xs[0].unsqueeze(0)
maxlen = max([x.size(0) for x in xs])
xt = xs[0].new_ones(len(xs), maxlen, xs[0].size(1)).fill_(pad)
for i in range(len(xs)):
xt[i, :xs[i].size(0)] = xs[i]
return xt
def pruning_points(feats, points, scores, depth=0, th=0.5):
if depth > 0:
g = int(8 ** depth)
scores = scores.reshape(scores.size(0), -1, g).sum(-1, keepdim=True)
scores = scores.expand(*scores.size()[:2], g).reshape(scores.size(0), -1)
alpha = (1 - torch.exp(-scores)) > th
feats = [feats[i][alpha[i]] for i in range(alpha.size(0))]
points = [points[i][alpha[i]] for i in range(alpha.size(0))]
points = padding_points(points, INF)
feats = padding_points(feats, 0)
return feats, points
def offset_points(point_xyz, quarter_voxel=1, offset_only=False, bits=2):
c = torch.arange(1, 2 * bits, 2, device=point_xyz.device)
ox, oy, oz = torch.meshgrid([c, c, c])
offset = (torch.cat([
ox.reshape(-1, 1),
oy.reshape(-1, 1),
oz.reshape(-1, 1)], 1).type_as(point_xyz) - bits) / float(bits - 1)
if not offset_only:
return point_xyz.unsqueeze(1) + offset.unsqueeze(0).type_as(point_xyz) * quarter_voxel
return offset.type_as(point_xyz) * quarter_voxel
def discretize_points(voxel_points, voxel_size):
# this function turns voxel centers/corners into integer indeices
# we assume all points are alreay put as voxels (real numbers)
minimal_voxel_point = voxel_points.min(dim=0, keepdim=True)[0]
voxel_indices = ((voxel_points - minimal_voxel_point) / voxel_size).round_().long() # float
residual = (voxel_points - voxel_indices.type_as(voxel_points) * voxel_size).mean(0, keepdim=True)
return voxel_indices, residual
def splitting_points(point_xyz, point_feats, values, half_voxel):
# generate new centers
quarter_voxel = half_voxel * .5
new_points = offset_points(point_xyz, quarter_voxel).reshape(-1, 3)
old_coords = discretize_points(point_xyz, quarter_voxel)[0]
new_coords = offset_points(old_coords).reshape(-1, 3)
new_keys0 = offset_points(new_coords).reshape(-1, 3)
# get unique keys and inverse indices (for original key0, where it maps to in keys)
new_keys, new_feats = torch.unique(new_keys0, dim=0, sorted=True, return_inverse=True)
new_keys_idx = new_feats.new_zeros(new_keys.size(0)).scatter_(
0, new_feats, torch.arange(new_keys0.size(0), device=new_feats.device) // 64)
# recompute key vectors using trilinear interpolation
new_feats = new_feats.reshape(-1, 8)
if values is not None:
p = (new_keys - old_coords[new_keys_idx]).type_as(point_xyz).unsqueeze(1) * .25 + 0.5 # (1/4 voxel size)
q = offset_points(p, .5, offset_only=True).unsqueeze(0) + 0.5 # BUG?
point_feats = point_feats[new_keys_idx]
point_feats = F.embedding(point_feats, values).view(point_feats.size(0), -1)
new_values = trilinear_interp(p, q, point_feats)
else:
new_values = None
return new_points, new_feats, new_values, new_keys
def expand_points(voxel_points, voxel_size):
_voxel_size = min([
torch.sqrt(((voxel_points[j:j+1] - voxel_points[j+1:]) ** 2).sum(-1).min())
for j in range(100)])
depth = int(np.round(torch.log2(_voxel_size / voxel_size)))
if depth > 0:
half_voxel = _voxel_size / 2.0
for _ in range(depth):
voxel_points = offset_points(voxel_points, half_voxel / 2.0).reshape(-1, 3)
half_voxel = half_voxel / 2.0
return voxel_points, depth
def get_edge(depth_pts, voxel_pts, voxel_size, th=0.05):
voxel_pts = offset_points(voxel_pts, voxel_size / 2.0)
diff_pts = (voxel_pts - depth_pts[:, None, :]).norm(dim=2)
ab = diff_pts.sort(dim=1)[0][:, :2]
a, b = ab[:, 0], ab[:, 1]
c = voxel_size
p = (ab.sum(-1) + c) / 2.0
h = (p * (p - a) * (p - b) * (p - c)) ** 0.5 / c
return h < (th * voxel_size)
# fill-in image
def fill_in(shape, hits, input, initial=1.0):
input_sizes = [k for k in input.size()]
if (len(input_sizes) == len(shape)) and \
all([shape[i] == input_sizes[i] for i in range(len(shape))]):
return input # shape is the same no need to fill
if isinstance(initial, torch.Tensor):
output = initial.expand(*shape)
else:
output = input.new_ones(*shape) * initial
if input is not None:
if len(shape) == 1:
return output.masked_scatter(hits, input)
return output.masked_scatter(hits.unsqueeze(-1).expand(*shape), input)
return output
def build_easy_octree(points, half_voxel):
coords, residual = discretize_points(points, half_voxel)
ranges = coords.max(0)[0] - coords.min(0)[0]
depths = torch.log2(ranges.max().float()).ceil_().long() - 1
center = (coords.max(0)[0] + coords.min(0)[0]) / 2
centers, children = build_octree(center, coords, int(depths))
centers = centers.float() * half_voxel + residual # transform back to float
return centers, children
def cartesian_to_spherical(xyz):
""" xyz: batch x 3
"""
r = xyz.norm(p=2, dim=-1)
theta = torch.atan2(xyz[:, :2].norm(p=2, dim=-1), xyz[:, 2])
phi = torch.atan2(xyz[:, 1], xyz[:, 0])
return torch.stack((r, theta, phi), 1)
def spherical_to_cartesian(rtp):
x = rtp[:, 0] * torch.sin(rtp[:, 1]) * torch.cos(rtp[:, 2])
y = rtp[:, 0] * torch.sin(rtp[:, 1]) * torch.sin(rtp[:, 2])
z = rtp[:, 0] * torch.cos(rtp[:, 1])
return torch.stack((x, y, z), 1) | 11,984 | 33.941691 | 112 | py |
NSVF | NSVF-main/fairnr/data/trajectory.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
TRAJECTORY_REGISTRY = {}
def register_traj(name):
def register_traj_fn(fn):
if name in TRAJECTORY_REGISTRY:
raise ValueError('Cannot register duplicate trajectory ({})'.format(name))
TRAJECTORY_REGISTRY[name] = fn
return fn
return register_traj_fn
def get_trajectory(name):
return TRAJECTORY_REGISTRY.get(name, None)
@register_traj('circle')
def circle(radius=3.5, h=0.0, axis='z', t0=0, r=1):
if axis == 'z':
return lambda t: [radius * np.cos(r * t+t0), radius * np.sin(r * t+t0), h]
elif axis == 'y':
return lambda t: [radius * np.cos(r * t+t0), h, radius * np.sin(r * t+t0)]
else:
return lambda t: [h, radius * np.cos(r * t+t0), radius * np.sin(r * t+t0)]
@register_traj('zoomin_circle')
def zoomin_circle(radius=3.5, h=0.0, axis='z', t0=0, r=1):
ra = lambda t: 0.1 + abs(4.0 - t * 2 / np.pi)
if axis == 'z':
return lambda t: [radius * ra(t) * np.cos(r * t+t0), radius * ra(t) * np.sin(r * t+t0), h]
elif axis == 'y':
return lambda t: [radius * ra(t) * np.cos(r * t+t0), h, radius * ra(t) * np.sin(r * t+t0)]
else:
return lambda t: [h, radius * (4.2 - t * 2 / np.pi) * np.cos(r * t+t0), radius * (4.2 - t * 2 / np.pi) * np.sin(r * t+t0)]
@register_traj('zoomin_line')
def zoomin_line(radius=3.5, h=0.0, axis='z', t0=0, r=1, min_r=0.0001, max_r=10, step_r=10):
ra = lambda t: min_r + (max_r - min_r) * t * 180 / np.pi / step_r
if axis == 'z':
return lambda t: [radius * ra(t) * np.cos(t0), radius * ra(t) * np.sin(t0), h * ra(t)]
elif axis == 'y':
return lambda t: [radius * ra(t) * np.cos(t0), h, radius * ra(t) * np.sin(t0)]
else:
return lambda t: [h, radius * (4.2 - t * 2 / np.pi) * np.cos(r * t+t0), radius * (4.2 - t * 2 / np.pi) * np.sin(r * t+t0)]
| 2,045 | 34.894737 | 130 | py |
NSVF | NSVF-main/fairnr/tasks/neural_rendering.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, copy
import json
import torch
import imageio
import numpy as np
from collections import defaultdict
from torchvision.utils import save_image
from argparse import Namespace
from fairseq.tasks import FairseqTask, register_task
from fairseq.optim.fp16_optimizer import FP16Optimizer
from fairseq.logging import progress_bar
from fairnr.data import (
ShapeViewDataset, SampledPixelDataset, ShapeViewStreamDataset,
WorldCoordDataset, ShapeDataset, InfiniteDataset
)
from fairnr.data.data_utils import write_images, recover_image, parse_views
from fairnr.data.geometry import ray, compute_normal_map
from fairnr.renderer import NeuralRenderer
from fairnr.data.trajectory import get_trajectory
from fairnr import ResetTrainerException
@register_task("single_object_rendering")
class SingleObjRenderingTask(FairseqTask):
"""
Task for remembering & rendering a single object.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser"""
parser.add_argument("data", help='data-path or data-directoy')
parser.add_argument("--object-id-path", type=str, help='path to object indices', default=None)
parser.add_argument("--no-preload", action="store_true")
parser.add_argument("--no-load-binary", action="store_true")
parser.add_argument("--load-depth", action="store_true",
help="load depth images if exists")
parser.add_argument("--transparent-background", type=str, default="1.0",
help="background color if the image is transparent")
parser.add_argument("--load-mask", action="store_true",
help="load pre-computed masks which is useful for subsampling during training.")
parser.add_argument("--train-views", type=str, default="0..50",
help="views sampled for training, you can set specific view id, or a range")
parser.add_argument("--valid-views", type=str, default="0..50",
help="views sampled for validation, you can set specific view id, or a range")
parser.add_argument("--test-views", type=str, default="0",
help="views sampled for rendering, only used for showing rendering results.")
parser.add_argument("--subsample-valid", type=int, default=-1,
help="if set > -1, subsample the validation (when training set is too large)")
parser.add_argument("--view-per-batch", type=int, default=6,
help="number of views training each batch (each GPU)")
parser.add_argument("--valid-view-per-batch", type=int, default=1,
help="number of views training each batch (each GPU)")
parser.add_argument("--view-resolution", type=str, default='64x64',
help="width for the squared image. downsampled from the original.")
parser.add_argument('--valid-view-resolution', type=str, default=None,
help="if not set, if valid view resolution will be train view resolution")
parser.add_argument("--min-color", choices=(0, -1), default=-1, type=int,
help="RGB range used in the model. conventionally used -1 ~ 1")
parser.add_argument("--virtual-epoch-steps", type=int, default=None,
help="virtual epoch used in Infinite Dataset. if None, set max-update")
parser.add_argument("--pruning-every-steps", type=int, default=None,
help="if the model supports pruning, prune unecessary voxels")
parser.add_argument("--half-voxel-size-at", type=str, default=None,
help='specific detailed number of updates to half the voxel sizes')
parser.add_argument("--reduce-step-size-at", type=str, default=None,
help='specific detailed number of updates to reduce the raymarching step sizes')
parser.add_argument("--prune-voxel-at", type=str, default=None,
help='specific detailed number of pruning voxels')
parser.add_argument("--rendering-every-steps", type=int, default=None,
help="if set, enables rendering online with default parameters")
parser.add_argument("--rendering-args", type=str, metavar='JSON')
parser.add_argument("--pruning-th", type=float, default=0.5,
help="if larger than this, we choose keep the voxel.")
parser.add_argument("--pruning-with-train-stats", action='store_true',
help="if set, model will run over the training set statstics to prune voxels.")
parser.add_argument("--pruning-rerun-train-set", action='store_true',
help="only works when --pruning-with-train-stats is also set.")
parser.add_argument("--output-valid", type=str, default=None)
def __init__(self, args):
super().__init__(args)
self._trainer, self._dummy_batch = None, None
# check dataset
self.train_data = self.val_data = self.test_data = args.data
self.object_ids = None if args.object_id_path is None else \
{line.strip(): i for i, line in enumerate(open(args.object_id_path))}
self.output_valid = getattr(args, "output_valid", None)
if os.path.isdir(args.data):
if os.path.exists(args.data + '/train.txt'):
self.train_data = args.data + '/train.txt'
if os.path.exists(args.data + '/val.txt'):
self.val_data = args.data + '/val.txt'
if os.path.exists(args.data + '/test.txt'):
self.test_data = args.data + '/test.txt'
if self.object_ids is None and os.path.exists(args.data + '/object_ids.txt'):
self.object_ids = {line.strip(): i for i, line in enumerate(open(args.data + '/object_ids.txt'))}
if self.object_ids is not None:
self.ids_object = {self.object_ids[o]: o for o in self.object_ids}
else:
self.ids_object = {0: 'model'}
if len(self.args.tensorboard_logdir) > 0 and getattr(args, "distributed_rank", -1) == 0:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(self.args.tensorboard_logdir + '/images')
else:
self.writer = None
self._num_updates = {'pv': -1, 'sv': -1, 'rs': -1, 're': -1}
self.pruning_every_steps = getattr(self.args, "pruning_every_steps", None)
self.pruning_th = getattr(self.args, "pruning_th", 0.5)
self.rendering_every_steps = getattr(self.args, "rendering_every_steps", None)
self.steps_to_half_voxels = getattr(self.args, "half_voxel_size_at", None)
self.steps_to_reduce_step = getattr(self.args, "reduce_step_size_at", None)
self.steps_to_prune_voxels = getattr(self.args, "prune_voxel_at", None)
if self.steps_to_half_voxels is not None:
self.steps_to_half_voxels = [int(s) for s in self.steps_to_half_voxels.split(',')]
if self.steps_to_reduce_step is not None:
self.steps_to_reduce_step = [int(s) for s in self.steps_to_reduce_step.split(',')]
if self.steps_to_prune_voxels is not None:
self.steps_to_prune_voxels = [int(s) for s in self.steps_to_prune_voxels.split(',')]
if self.rendering_every_steps is not None:
gen_args = {
'path': args.save_dir,
'render_beam': 1, 'render_resolution': '512x512',
'render_num_frames': 120, 'render_angular_speed': 3,
'render_output_types': ["rgb"], 'render_raymarching_steps': 10,
'render_at_vector': "(0,0,0)", 'render_up_vector': "(0,0,-1)",
'render_path_args': "{'radius': 1.5, 'h': 0.5}",
'render_path_style': 'circle', "render_output": None
}
gen_args.update(json.loads(getattr(args, 'rendering_args', '{}') or '{}'))
self.renderer = self.build_generator(Namespace(**gen_args))
else:
self.renderer = None
self.train_views = parse_views(args.train_views)
self.valid_views = parse_views(args.valid_views)
self.test_views = parse_views(args.test_views)
@classmethod
def setup_task(cls, args, **kwargs):
"""
Setup the task
"""
return cls(args)
def repeat_dataset(self, split):
return 1 if split != 'train' else self.args.distributed_world_size # IMPORTANT!
def load_dataset(self, split, **kwargs):
"""
Load a given dataset split (train, valid, test)
"""
self.datasets[split] = ShapeViewDataset(
self.train_data if split == 'train' else \
self.val_data if split == 'valid' else self.test_data,
views=self.train_views if split == 'train' else \
self.valid_views if split == 'valid' else self.test_views,
num_view=self.args.view_per_batch if split == 'train' else \
self.args.valid_view_per_batch if split == 'valid' else 1,
resolution=self.args.view_resolution if split == 'train' else \
getattr(self.args, "valid_view_resolution", self.args.view_resolution) if split == 'valid' else \
getattr(self.args, "render_resolution", self.args.view_resolution),
subsample_valid=self.args.subsample_valid if split == 'valid' else -1,
train=(split=='train'),
load_depth=self.args.load_depth and (split!='test'),
load_mask=self.args.load_mask and (split!='test'),
repeat=self.repeat_dataset(split),
preload=(not getattr(self.args, "no_preload", False)) and (split!='test'),
binarize=(not getattr(self.args, "no_load_binary", False)) and (split!='test'),
bg_color=getattr(self.args, "transparent_background", "1,1,1"),
min_color=getattr(self.args, "min_color", -1),
ids=self.object_ids
)
if split == 'train':
max_step = getattr(self.args, "virtual_epoch_steps", None)
if max_step is not None:
total_num_models = max_step * self.args.distributed_world_size * self.args.max_sentences
else:
total_num_models = 10000000
if getattr(self.args, "pruning_rerun_train_set", False):
self._unique_trainset = ShapeViewStreamDataset(copy.deepcopy(self.datasets[split])) # backup
self._unique_trainitr = self.get_batch_iterator(
self._unique_trainset, max_sentences=self.args.max_sentences_valid, seed=self.args.seed,
num_shards=self.args.distributed_world_size, shard_id=self.args.distributed_rank,
num_workers=self.args.num_workers)
self.datasets[split] = InfiniteDataset(self.datasets[split], total_num_models)
if split == 'valid':
self.datasets[split] = ShapeViewStreamDataset(self.datasets[split])
def build_generator(self, args):
"""
build a neural renderer for visualization
"""
return NeuralRenderer(
beam=args.render_beam,
resolution=args.render_resolution,
frames=args.render_num_frames,
speed=args.render_angular_speed,
raymarching_steps=args.render_raymarching_steps,
path_gen=get_trajectory(args.render_path_style)(
**eval(args.render_path_args)
),
at=eval(args.render_at_vector),
up=eval(args.render_up_vector),
fps=getattr(args, "render_save_fps", 24),
output_dir=args.render_output if args.render_output is not None
else os.path.join(args.path, "output"),
output_type=args.render_output_types,
test_camera_poses=getattr(args, "render_camera_poses", None),
test_camera_intrinsics=getattr(args, "render_camera_intrinsics", None),
test_camera_views=getattr(args, "render_views", None)
)
def setup_trainer(self, trainer):
# give the task ability to access the global trainer functions
self._trainer = trainer
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
def update_step(self, num_updates, name='re'):
"""Task level update when number of updates increases.
This is called after the optimization step and learning rate
update at each iteration.
"""
self._num_updates[name] = num_updates
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
if (((self.pruning_every_steps is not None) and \
(update_num % self.pruning_every_steps == 0) and \
(update_num > 0)) or \
((self.steps_to_prune_voxels is not None) and \
update_num in self.steps_to_prune_voxels) \
) and \
(update_num > self._num_updates['pv']) and \
hasattr(model, 'prune_voxels'):
model.eval()
if getattr(self.args, "pruning_rerun_train_set", False):
with torch.no_grad():
model.clean_caches(reset=True)
progress = progress_bar.progress_bar(
self._unique_trainitr.next_epoch_itr(shuffle=False),
prefix=f"pruning based statiscs over training set",
tensorboard_logdir=None,
default_log_format=self.args.log_format if self.args.log_format is not None else "tqdm")
for step, inner_sample in enumerate(progress):
outs = model(**self._trainer._prepare_sample(self.filter_dummy(inner_sample)))
progress.log(stats=outs['other_logs'], tag='track', step=step)
model.prune_voxels(self.pruning_th, train_stats=getattr(self.args, "pruning_with_train_stats", False))
self.update_step(update_num, 'pv')
if self.steps_to_half_voxels is not None and \
(update_num in self.steps_to_half_voxels) and \
(update_num > self._num_updates['sv']):
model.split_voxels()
self.update_step(update_num, 'sv')
raise ResetTrainerException
if self.rendering_every_steps is not None and \
(update_num % self.rendering_every_steps == 0) and \
(update_num > 0) and \
self.renderer is not None and \
(update_num > self._num_updates['re']):
sample_clone = {key: sample[key].clone() if sample[key] is not None else None for key in sample }
outputs = self.inference_step(self.renderer, [model], [sample_clone, 0])[1]
if getattr(self.args, "distributed_rank", -1) == 0: # save only for master
self.renderer.save_images(outputs, update_num)
self.steps_to_half_voxels = [a for a in self.steps_to_half_voxels if a != update_num]
if self.steps_to_reduce_step is not None and \
update_num in self.steps_to_reduce_step and \
(update_num > self._num_updates['rs']):
model.reduce_stepsize()
self.update_step(update_num, 'rs')
self.update_step(update_num, 'step')
return super().train_step(sample, model, criterion, optimizer, update_num, ignore_grad)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
model.add_eval_scores(logging_output, sample, model.cache, criterion, outdir=self.output_valid)
if self.writer is not None:
images = model.visualize(sample, shape=0, view=0)
if images is not None:
write_images(self.writer, images, self._num_updates['step'])
return loss, sample_size, logging_output
def save_image(self, img, id, view, group='gt'):
object_name = self.ids_object[id.item()]
def _mkdir(x):
if not os.path.exists(x):
os.mkdir(x)
_mkdir(self.output_valid)
_mkdir(os.path.join(self.output_valid, group))
_mkdir(os.path.join(self.output_valid, group, object_name))
imageio.imsave(os.path.join(
self.output_valid, group, object_name,
'{:04d}.png'.format(view)),
(img * 255).astype(np.uint8))
def filter_dummy(self, sample):
if self._dummy_batch is None:
self._dummy_batch = sample
if sample is None:
sample = self._dummy_batch
return sample
| 17,291 | 50.159763 | 114 | py |
NSVF | NSVF-main/fairnr_cli/render.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a copy of fairseq-generate while simpler for other usage.
"""
import logging
import math
import os
import sys
import time
import torch
import imageio
import numpy as np
from fairseq import checkpoint_utils, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
from fairnr import options
def main(args):
assert args.path is not None, '--path required for generation!'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1) as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairnr_cli.render')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
output_files, step= [], 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for i, sample in enumerate(t):
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
step, _output_files = task.inference_step(generator, models, [sample, step])
output_files += _output_files
gen_timer.stop(500)
wps_meter.update(500)
t.log({'wps': round(wps_meter.avg)})
break
# if i > 5:
# break
generator.save_images(output_files, combine_output=args.render_combine_output)
def cli_main():
parser = options.get_rendering_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 3,570 | 28.03252 | 96 | py |
NSVF | NSVF-main/fairnr_cli/render_multigpu.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is a copy of fairseq-generate while simpler for other usage.
"""
import logging
import math
import os
import sys
import time
import torch
import imageio
import numpy as np
from fairseq import checkpoint_utils, progress_bar, tasks, utils, distributed_utils
from fairseq.meters import StopwatchMeter, TimeMeter
from fairseq.options import add_distributed_training_args
from fairnr import options
def main(args, *kwargs):
assert args.path is not None, '--path required for generation!'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1) as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairnr_cli.render')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
if args.fp16:
model.half()
if use_cuda:
model.cuda()
logging.info(model)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
shard_id, world_size = args.distributed_rank, args.distributed_world_size
output_files = []
if generator.test_poses is not None:
total_frames = generator.test_poses.shape[0]
_frames = int(np.floor(total_frames / world_size))
step = shard_id * _frames
frames = _frames if shard_id < (world_size - 1) else total_frames - step
else:
step = shard_id * args.render_num_frames
frames = args.render_num_frames
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for i, sample in enumerate(t):
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
step, _output_files = task.inference_step(
generator, models, [sample, step, frames])
output_files += _output_files
gen_timer.stop(500)
wps_meter.update(500)
t.log({'wps': round(wps_meter.avg)})
timestamp = generator.save_images(
output_files, steps='shard{}'.format(shard_id), combine_output=args.render_combine_output)
# join videos from all GPUs and delete temp files
try:
timestamps = distributed_utils.all_gather_list(timestamp)
except:
timestamps = [timestamp]
if shard_id == 0:
generator.merge_videos(timestamps)
def cli_main():
parser = options.get_rendering_parser()
add_distributed_training_args(parser)
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main)
if __name__ == '__main__':
cli_main()
| 4,399 | 29.985915 | 98 | py |
NSVF | NSVF-main/fairnr_cli/validate.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
import numpy as np
import torch
from itertools import chain
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.logging import metrics, progress_bar
from fairseq.options import add_distributed_training_args
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairnr_cli.validate')
def main(args, override_args=None):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if override_args is not None:
try:
override_args = override_args['override_args']
except TypeError:
override_args = override_args
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
suffix=getattr(args, "checkpoint_suffix", ""),
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
if use_fp16:
criterion.half()
if use_cuda:
criterion.cuda()
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = utils.apply_to_sample(
lambda t: t.half() if t.dtype is torch.float32 else t, sample) if use_fp16 else sample
try:
with torch.no_grad(): # do not save backward passes
max_num_rays = 900 * 900
if sample['uv'].shape[3] > max_num_rays:
sample['ray_split'] = sample['uv'].shape[3] // max_num_rays
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
except TypeError:
break
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
# summarize all the gpus
if args.distributed_world_size > 1:
all_log_output = list(zip(*distributed_utils.all_gather_list([log_output])))[0]
log_output = {
key: np.mean([log[key] for log in all_log_output])
for key in all_log_output[0]
}
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
# support multi-gpu validation, use all available gpus
default_world_size = max(1, torch.cuda.device_count())
if args.distributed_world_size < default_world_size:
args.distributed_world_size = default_world_size
override_args.distributed_world_size = default_world_size
distributed_utils.call_main(args, main, override_args=override_args)
if __name__ == '__main__':
cli_main()
| 5,384 | 33.082278 | 102 | py |
NSVF | NSVF-main/fairnr_cli/extract.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This code is used for extact voxels/meshes from the learne model
"""
import logging
import numpy as np
import torch
import sys, os
import argparse
from fairseq import options
from fairseq import checkpoint_utils
from plyfile import PlyData, PlyElement
def main(args):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairnr_cli.extract')
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path], suffix=getattr(args, "checkpoint_suffix", ""))
model = models[0]
if use_cuda:
model.cuda()
if args.format == 'mc_mesh':
plydata = model.encoder.export_surfaces(
model.field, th=args.mc_threshold,
bits=2 * args.mc_num_samples_per_halfvoxel)
elif args.format == 'voxel_center':
plydata = model.encoder.export_voxels(False)
elif args.format == 'voxel_mesh':
plydata = model.encoder.export_voxels(True)
else:
raise NotImplementedError
# write to ply file.
if not os.path.exists(args.output):
os.makedirs(args.output)
plydata.text = args.savetext
plydata.write(open(os.path.join(args.output, args.name + '.ply'), 'wb'))
def cli_main():
parser = argparse.ArgumentParser(description='Extract geometry from a trained model (only for learnable embeddings).')
parser.add_argument('--path', type=str, required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('--name', type=str, default='sparsevoxel')
parser.add_argument('--format', type=str, choices=['voxel_center', 'voxel_mesh', 'mc_mesh'])
parser.add_argument('--savetext', action='store_true', help='save .ply in plain text')
parser.add_argument('--mc-num-samples-per-halfvoxel', type=int, default=8,
help="""the number of point samples every half voxel-size for marching cube.
For instance, by setting to 8, it will use (8 x 2) ^ 3 = 4096 points to compute density for each voxel.
In practise, the larger this number is, the more accurate surface you get.
""")
parser.add_argument('--mc-threshold', type=float, default=0.5,
help="""the threshold used to find the isosurface from the learned implicit field.
In our implementation, we define our values as ``1 - exp(-max(0, density))``
where "0" is empty and "1" is fully occupied.
""")
parser.add_argument('--user-dir', default='fairnr')
parser.add_argument('--cpu', action='store_true')
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 3,180 | 38.7625 | 127 | py |
NSVF | NSVF-main/fairnr_cli/train.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
This file is mostly copied from the original fairseq code
"""
import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.trainer import Trainer
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairnr import ResetTrainerException
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger('fairnr_cli.train')
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
metrics.reset()
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion)
else:
trainer = MegatronTrainer(args, task, model, criterion)
task.setup_trainer(trainer)
logger.info('training on {} GPUs'.format(args.distributed_world_size))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while (
lr > args.min_lr
and epoch_itr.next_epoch_idx <= max_epoch
):
# train for one epoch
should_end_training = train(args, trainer, task, epoch_itr)
valid_losses = validate_and_save(args, trainer, task, epoch_itr, valid_subsets)
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=(os.pathsep in getattr(args, 'data', '')),
)
if should_end_training:
break
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, 'best', None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info('early stop since valid performance hasn\'t improved for last {} runs'.format(args.patience))
return True
else:
return False
@metrics.aggregate('train')
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
# task specific setup per epoch
task.begin_epoch(epoch_itr.epoch, trainer.get_model())
valid_subsets = args.valid_subset.split(',')
max_update = args.max_update or math.inf
should_end_training = False
for samples in progress:
with metrics.aggregate('train_inner'):
try:
log_output = trainer.train_step(samples)
except ResetTrainerException:
trainer._wrapped_criterion = None
trainer._wrapped_model = None
trainer._optimizer = None
logger.info("reset the trainer at {}".format(trainer.get_num_updates()))
log_output = trainer.train_step(samples)
if log_output is None: # OOM, overflow, ...
continue
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values('train_inner'))
progress.log(stats, tag='train_inner', step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters('train_inner')
valid_losses = validate_and_save(args, trainer, task, epoch_itr, valid_subsets)
if should_stop_early(args, valid_losses[0]) or num_updates >= max_update:
should_end_training = True
break
# log end-of-epoch stats
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train')
return should_end_training
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets):
num_updates = trainer.get_num_updates()
do_save = (
(
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
)
or (
epoch_itr.end_of_epoch()
and epoch_itr.epoch % args.save_interval == 0
)
)
do_validate = (
(
do_save # saving requires validation
or (
epoch_itr.end_of_epoch()
and epoch_itr.epoch % args.validate_interval == 0
)
)
and not args.disable_validation
)
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Save
if do_save:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses
def get_training_stats(stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
# reset dummy batch only for validation
trainer._dummy_batch = "DUMMY" # reset dummy batch
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for step, sample in enumerate(progress):
trainer.valid_step(sample)
stats = get_training_stats(agg.get_smoothed_values())
plog = progress.log
if hasattr(progress, "wrapped_bar"):
plog = progress.wrapped_bar.log
plog(stats, tag='valid', step=step)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
# reset dummy batch again for continuing training
trainer._dummy_batch = "DUMMY"
return valid_losses
def get_valid_stats(args, trainer, stats):
if 'nll_loss' in stats and 'ppl' not in stats:
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[args.best_checkpoint_metric],
)
return stats
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
if __name__ == '__main__':
cli_main()
| 13,414 | 34.489418 | 117 | py |
RegularizedBN | RegularizedBN-main/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >= 3.6 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
if sys.platform == 'darwin':
extra_compile_args = ['-stdlib=libc++', '-O3']
else:
extra_compile_args = ['-std=c++11', '-O3']
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.data_utils_fast',
sources=['fairseq/data/data_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.token_block_utils_fast',
sources=['fairseq/data/token_block_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat',
sources=[
'fairseq/clib/libnat/edit_dist.cpp',
],
)
])
if 'CUDA_HOME' in os.environ:
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat_cuda',
sources=[
'fairseq/clib/libnat_cuda/edit_dist.cu',
'fairseq/clib/libnat_cuda/binding.cpp'
],
)])
cmdclass['build_ext'] = cpp_extension.BuildExtension
except ImportError:
pass
if 'READTHEDOCS' in os.environ:
# don't build extensions when generating docs
extensions = []
if 'build_ext' in cmdclass:
del cmdclass['build_ext']
# use CPU build of PyTorch
dependency_links = [
'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'
]
else:
dependency_links = []
if 'clean' in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True)
setup(
name='fairseq',
version='0.9.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
setup_requires=[
'cython',
'numpy',
'setuptools>=18.0',
],
install_requires=[
'cffi',
'cython',
'editdistance',
'numpy',
'regex',
'sacrebleu',
'torch',
'tqdm',
],
dependency_links=dependency_links,
packages=find_packages(exclude=['scripts', 'tests']),
ext_modules=extensions,
test_suite='tests',
entry_points={
'console_scripts': [
'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',
'fairseq-generate = fairseq_cli.generate:cli_main',
'fairseq-interactive = fairseq_cli.interactive:cli_main',
'fairseq-preprocess = fairseq_cli.preprocess:cli_main',
'fairseq-score = fairseq_cli.score:cli_main',
'fairseq-train = fairseq_cli.train:cli_main',
'fairseq-validate = fairseq_cli.validate:cli_main',
],
},
cmdclass=cmdclass,
zip_safe=False,
)
| 4,389 | 25.768293 | 101 | py |
RegularizedBN | RegularizedBN-main/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from fairseq.hub_utils import BPEHubInterface as bpe # noqa
from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa
from fairseq.models import MODEL_REGISTRY
dependencies = [
'numpy',
'regex',
'requests',
'torch',
]
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast
except (ImportError, ModuleNotFoundError):
try:
import cython
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), 'setup.py'),
['build_ext', '--inplace'],
)
except (ImportError, ModuleNotFoundError):
print(
'Unable to build Cython components. Please make sure Cython is '
'installed if the torch.hub model you are loading depends on it.'
)
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
# to simplify the interface we only expose named models
# globals()[_model_type] = _cls.from_pretrained
| 1,432 | 28.244898 | 78 | py |
RegularizedBN | RegularizedBN-main/examples/wav2vec/vq-wav2vec_featurize.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import pprint
import glob, os, argparse
import torch
from torch import nn
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
from fairseq.models.wav2vec.wav2vec import Wav2VecModel
import tqdm
import soundfile as sf
from torch.utils.data import DataLoader
import os.path as osp
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, 'r') as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, 'r') as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard::self.args.num_shards]
lbls = []
with open(self.data_file(split), 'w') as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + '\n')
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), 'w') as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files)//32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
cp = torch.load(self.checkpoint, map_location=lambda x, _: x)
model = Wav2VecModel.build_model(cp["args"], None)
self.quantize_location = getattr(cp["args"], "vq", "encoder")
model.load_state_dict(cp["model"])
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (self.args.shard is None or self.args.shard == 0):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.") | 7,714 | 29.737052 | 111 | py |
RegularizedBN | RegularizedBN-main/examples/wav2vec/wav2vec_featurize.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec.wav2vec import Wav2VecModel
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i",
help="Input Directory", **kwargs)
self.add_argument("--output", "-o",
help="Output Directory", **kwargs)
self.add_argument("--model",
help="Path to model checkpoint", **kwargs)
self.add_argument("--split",
help="Dataset Splits", nargs='+', **kwargs)
self.add_argument("--ext", default="wav", required=False,
help="Audio file extension")
self.add_argument("--no-copy-labels", action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.")
self.add_argument("--use-feat", action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument("--gpu",
help="GPU to use", default=0, type=int)
class Prediction():
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer():
""" Write features as hdf5 file in wav2letter++ compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
""" Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(self, input_root, output_root, split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), \
"Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \
map(os.path.basename, paths))
for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| 7,110 | 29.004219 | 135 | py |
RegularizedBN | RegularizedBN-main/examples/translation_moe/src/mean_pool_gating_network.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
class MeanPoolGatingNetwork(torch.nn.Module):
"""A simple mean-pooling gating network for selecting experts.
This module applies mean pooling over an encoder's output and returns
reponsibilities for each expert. The encoder format is expected to match
:class:`fairseq.models.transformer.TransformerEncoder`.
"""
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if not (
hasattr(encoder_out, 'encoder_out')
and hasattr(encoder_out, 'encoder_padding_mask')
and encoder_out.encoder_out.size(2) == self.embed_dim
):
raise ValueError('Unexpected format for encoder_out')
# mean pooling over time
encoder_padding_mask = encoder_out.encoder_padding_mask # B x T
encoder_out = encoder_out.encoder_out.transpose(0, 1) # B x T x C
if encoder_padding_mask is not None:
encoder_out = encoder_out.clone() # required because of transpose above
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True)
x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out)
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
| 2,007 | 38.372549 | 84 | py |
RegularizedBN | RegularizedBN-main/examples/translation_moe/src/logsumexp_moe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class LogSumExpMoE(torch.autograd.Function):
"""Standard LogSumExp forward pass, but use *posterior* for the backward.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
@staticmethod
def forward(ctx, logp, posterior, dim=-1):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
posterior, = ctx.saved_tensors
grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
return grad_logp, None, None
| 835 | 29.962963 | 78 | py |
RegularizedBN | RegularizedBN-main/examples/translation_moe/src/translation_moe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import metrics, utils
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from .logsumexp_moe import LogSumExpMoE
from .mean_pool_gating_network import MeanPoolGatingNetwork
@register_task('translation_moe')
class TranslationMoETask(TranslationTask):
"""
Translation task for Mixture of Experts (MoE) models.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--method', default='hMoEup',
choices=['sMoElp', 'sMoEup', 'hMoElp', 'hMoEup'])
parser.add_argument('--num-experts', default=3, type=int, metavar='N',
help='number of experts')
parser.add_argument('--mean-pool-gating-network', action='store_true',
help='use a simple mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-dropout', type=float,
help='dropout for mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-encoder-dim', type=float,
help='encoder output dim for mean-pooling gating network')
parser.add_argument('--gen-expert', type=int, default=0,
help='which expert to use for generation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
if args.method == 'sMoElp':
# soft MoE with learned prior
self.uniform_prior = False
self.hard_selection = False
elif args.method == 'sMoEup':
# soft MoE with uniform prior
self.uniform_prior = True
self.hard_selection = False
elif args.method == 'hMoElp':
# hard MoE with learned prior
self.uniform_prior = False
self.hard_selection = True
elif args.method == 'hMoEup':
# hard MoE with uniform prior
self.uniform_prior = True
self.hard_selection = True
# add indicator tokens for each expert
for i in range(args.num_experts):
# add to both dictionaries in case we're sharing embeddings
src_dict.add_symbol('<expert_{}>'.format(i))
tgt_dict.add_symbol('<expert_{}>'.format(i))
super().__init__(args, src_dict, tgt_dict)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not self.uniform_prior and not hasattr(model, 'gating_network'):
if self.args.mean_pool_gating_network:
if getattr(args, 'mean_pool_gating_network_encoder_dim', None):
encoder_dim = args.mean_pool_gating_network_encoder_dim
elif getattr(args, 'encoder_embed_dim', None):
# assume that encoder_embed_dim is the encoder's output dimension
encoder_dim = args.encoder_embed_dim
else:
raise ValueError('Must specify --mean-pool-gating-network-encoder-dim')
if getattr(args, 'mean_pool_gating_network_dropout', None):
dropout = args.mean_pool_gating_network_dropout
elif getattr(args, 'dropout', None):
dropout = args.dropout
else:
raise ValueError('Must specify --mean-pool-gating-network-dropout')
model.gating_network = MeanPoolGatingNetwork(
encoder_dim, args.num_experts, dropout,
)
else:
raise ValueError(
'translation_moe task with learned prior requires the model to '
'have a gating network; try using --mean-pool-gating-network'
)
return model
def expert_index(self, i):
return i + self.tgt_dict.index('<expert_0>')
def _get_loss(self, sample, model, criterion):
assert hasattr(criterion, 'compute_loss'), \
'translation_moe task requires the criterion to implement the compute_loss() method'
k = self.args.num_experts
bsz = sample['target'].size(0)
def get_lprob_y(encoder_out, prev_output_tokens_k):
net_output = model.decoder(
prev_output_tokens=prev_output_tokens_k,
encoder_out=encoder_out,
)
loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False)
loss = loss.view(bsz, -1)
return -loss.sum(dim=1, keepdim=True) # -> B x 1
def get_lprob_yz(winners=None):
encoder_out = model.encoder(
src_tokens=sample['net_input']['src_tokens'],
src_lengths=sample['net_input']['src_lengths'],
)
if winners is None:
lprob_y = []
for i in range(k):
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
assert not prev_output_tokens_k.requires_grad
prev_output_tokens_k[:, 0] = self.expert_index(i)
lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k))
lprob_y = torch.cat(lprob_y, dim=1) # -> B x K
else:
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
prev_output_tokens_k[:, 0] = self.expert_index(winners)
lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B
if self.uniform_prior:
lprob_yz = lprob_y
else:
lprob_z = model.gating_network(encoder_out) # B x K
if winners is not None:
lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1))
lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K
return lprob_yz
# compute responsibilities without dropout
with utils.eval(model): # disable dropout
with torch.no_grad(): # disable autograd
lprob_yz = get_lprob_yz() # B x K
prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1)
assert not prob_z_xy.requires_grad
# compute loss with dropout
if self.hard_selection:
winners = prob_z_xy.max(dim=1)[1]
loss = -get_lprob_yz(winners)
else:
lprob_yz = get_lprob_yz() # B x K
loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1)
loss = loss.sum()
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data),
'ntokens': sample['ntokens'],
'nsentences': bsz,
'sample_size': sample_size,
'posterior': prob_z_xy.float().sum(dim=0).cpu(),
}
return loss, sample_size, logging_output
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, expert=None, constraints=None):
expert = expert or self.args.gen_expert
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
bos_token=self.expert_index(expert),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
metrics.log_scalar(
'posterior',
sum(log['posterior'] for log in logging_outputs if 'posterior' in log)
)
| 9,137 | 40.348416 | 107 | py |
RegularizedBN | RegularizedBN-main/examples/roberta/commonsense_qa/commonsense_qa_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
@register_task('commonsense_qa')
class CommonsenseQATask(FairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
parser.add_argument('--num-classes', type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'sentence_ranking', 'Must set --criterion=sentence_ranking'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=True, add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if 'answerKey' in example:
label = ord(example['answerKey']) - ord('A')
labels.append(label)
question = example['question']['stem']
assert len(example['question']['choices']) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = 'Q: ' + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example['question']['choices']):
src = 'A: ' + choice['text']
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes))
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
'id': IdDataset(),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update({
'net_input{}'.format(i + 1): {
'src_tokens': RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': src_lengths[i],
}
})
if len(labels) > 0:
dataset.update({'target': RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print('| Loaded {} with {} samples'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
'sentence_classification_head',
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| 5,921 | 32.84 | 103 | py |
RegularizedBN | RegularizedBN-main/examples/roberta/wsc/wsc_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
from . import wsc_utils
@register_task('wsc')
class WSCTask(FairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == 'gpt2':
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'wsc', 'Must set --criterion=wsc'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=append_eos, add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start:mask_start + mask_size] = 1
return toks, mask
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[:pronoun_span.start].text
suffix = sentence[pronoun_span.end:].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = ' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else ''
trailing_space = ' ' if pronoun_span.text_with_ws.endswith(' ') else ''
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text, prefix, suffix, leading_space, trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1]*len(labels))
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'labels': labels,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + '\n').encode('utf-8'))
dataset = self.load_dataset(
'disambiguate_pronoun',
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample['candidate_tokens'][0],
sample['candidate_masks'][0],
)
if sample['query_tokens'][0] is not None:
query_lprobs = get_lprobs(
sample['query_tokens'][0].unsqueeze(0),
sample['query_masks'][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample['candidate_tokens'][0][best_idx]
mask = sample['candidate_masks'][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task('winogrande')
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'winogrande', 'Must set --criterion=winogrande'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == 'test'))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[:pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1]:]
leading_space = ' ' if sentence[:pronoun_span[0]].endswith(' ') else ''
trailing_space = ''
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text, prefix, suffix, leading_space, trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(candidate_tokens, candidate_lengths, self.vocab.pad())
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| 13,148 | 33.970745 | 103 | py |
RegularizedBN | RegularizedBN-main/examples/roberta/wsc/wsc_criterion.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
@register_criterion('wsc')
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--wsc-margin-alpha', type=float, metavar='A', default=1.0)
parser.add_argument('--wsc-margin-beta', type=float, metavar='B', default=0.0)
parser.add_argument('--wsc-cross-entropy', action='store_true',
help='use cross entropy formulation instead of margin loss')
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
- query_lprobs
+ self.args.wsc_margin_alpha * (
cand_lprobs - query_lprobs + self.args.wsc_margin_beta
).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0., 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample['labels']):
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'][i].unsqueeze(0),
sample['query_masks'][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'][i],
sample['candidate_masks'][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample['id'][i].item()
if self.prediction_h is not None:
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs)
nqueries = sum(log.get('nqueries', 0) for log in logging_outputs)
if nqueries > 0:
agg_output['accuracy'] = ncorrect / float(nqueries)
return agg_output
@register_criterion('winogrande')
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'],
sample['query_masks'],
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'],
sample['candidate_masks'],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample['query_tokens'].size(0)
ncorrect = pred.sum().item()
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': sample_size,
}
return loss, sample_size, logging_output
| 6,034 | 35.137725 | 88 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/infer.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import editdistance
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, utils, tasks
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data.data_utils import post_process
logging.basicConfig()
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder", choices=["viterbi", "kenlm", "fairseqlm"], help="use a w2l decoder"
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--unit-lm", action='store_true', help="if using a unit lm")
parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--beam-size-token", type=float, default=100)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
parser.add_argument(
"--dump-emissions",
type=str,
default=None,
help="if present, dumps emissions into this file and exits",
)
parser.add_argument(
"--dump-features",
type=str,
default=None,
help="if present, dumps features into this file and exits",
)
parser.add_argument(
"--load-emissions",
type=str,
default=None,
help="if present, loads emissions from this file",
)
return parser
def check_args(args):
# assert args.path is not None, "--path required for generation!"
# assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def get_dataset_itr(args, task, models):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, args.remove_bpe)
if res_files is not None:
print(
"{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"]
)
print("{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"])
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = post_process(tgt_pieces, args.remove_bpe)
if res_files is not None:
print("{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"])
print("{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"])
# only score top hypothesis
if not args.quiet:
logger.debug("HYPO:" + hyp_words)
logger.debug("TARGET:" + tgt_words)
logger.debug("___________________")
hyp_words = hyp_words.split()
tgt_words = tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def prepare_result_files(args):
def get_res_file(file_prefix):
if args.num_shards > 1:
file_prefix = f'{args.shard_id}_{file_prefix}'
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
if not args.results_path:
return None
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def load_models_and_criterions(filenames, data_path, arg_overrides=None, task=None, model_state=None):
models = []
criterions = []
if arg_overrides is None:
arg_overrides = {}
arg_overrides['wer_args'] = None
arg_overrides['data'] = data_path
if filenames is None:
assert model_state is not None
filenames = [0]
else:
filenames = filenames.split(":")
for filename in filenames:
if model_state is None:
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)
else:
state = model_state
args = state["args"]
if task is None:
task = tasks.setup_task(args)
model = task.build_model(args)
model.load_state_dict(state["model"], strict=True)
models.append(model)
criterion = task.build_criterion(args)
if "criterion" in state:
criterion.load_state_dict(state["criterion"], strict=True)
criterions.append(criterion)
return models, criterions, args
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
class ExistingEmissionsDecoder(object):
def __init__(self, decoder, emissions):
self.decoder = decoder
self.emissions = emissions
def generate(self, models, sample, **unused):
ids = sample["id"].cpu().numpy()
try:
emissions = np.stack(self.emissions[ids])
except:
print([x.shape for x in self.emissions[ids]])
raise Exception('invalid sizes')
emissions = torch.from_numpy(emissions)
return self.decoder.decode(emissions)
def main(args, task=None, model_state=None):
check_args(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 4000000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
if task is None:
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info("| decoding with criterion {}".format(args.criterion))
# Load ensemble
if args.load_emissions:
models, criterions = [], []
else:
logger.info("| loading model(s) from {}".format(args.path))
models, criterions, _ = load_models_and_criterions(
args.path,
data_path=args.data,
arg_overrides=eval(args.model_overrides), # noqa
task=task,
model_state=model_state,
)
optimize_models(args, use_cuda, models)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
trans = criterions[0].asg.trans.data
args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task, models)
# Initialize generator
gen_timer = StopwatchMeter()
def build_generator(args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, task.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, task.target_dictionary)
elif w2l_decoder == "fairseqlm":
from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
return W2lFairseqLMDecoder(args, task.target_dictionary)
else:
return super().build_generator(args)
generator = build_generator(args)
if args.load_emissions:
generator = ExistingEmissionsDecoder(
generator, np.load(args.load_emissions, allow_pickle=True)
)
logger.info("loaded emissions from " + args.load_emissions)
num_sentences = 0
if args.results_path is not None and not os.path.exists(args.results_path):
os.makedirs(args.results_path)
max_source_pos = (
utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
),
)
if max_source_pos is not None:
max_source_pos = max_source_pos[0]
if max_source_pos is not None:
max_source_pos = max_source_pos[0] - 1
if args.dump_emissions:
emissions = {}
if args.dump_features:
features = {}
models[0].bert.proj = None
else:
res_files = prepare_result_files(args)
errs_t = 0
lengths_t = 0
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
if args.dump_emissions:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
emm = emm.transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
emissions[id.item()] = emm[i]
continue
elif args.dump_features:
with torch.no_grad():
encoder_out = models[0](**sample["net_input"])
feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
for i, id in enumerate(sample["id"]):
padding = encoder_out["encoder_padding_mask"][i].cpu().numpy() if encoder_out["encoder_padding_mask"] is not None else None
features[id.item()] = (feat[i], padding)
continue
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = None
# id = task.dataset(args.gen_subset).ids[int(sample_id)]
id = sample_id
toks = sample["target"][i, :] if 'target_label' not in sample else sample["target_label"][i, :]
target_tokens = (
utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
)
# Process top predictions
errs, length = process_predictions(
args, hypos[i], None, tgt_dict, target_tokens, res_files, speaker, id
)
errs_t += errs
lengths_t += length
wps_meter.update(num_generated_tokens)
t.log({"wps": round(wps_meter.avg)})
num_sentences += sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
wer = None
if args.dump_emissions:
emm_arr = []
for i in range(len(emissions)):
emm_arr.append(emissions[i])
np.save(args.dump_emissions, emm_arr)
logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
elif args.dump_features:
feat_arr = []
for i in range(len(features)):
feat_arr.append(features[i])
np.save(args.dump_features, feat_arr)
logger.info(f"saved {len(features)} emissions to {args.dump_features}")
else:
if lengths_t > 0:
wer = errs_t * 100.0 / lengths_t
logger.info(f"WER: {wer}")
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
return task, wer
def make_parser():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
return parser
def cli_main():
parser = make_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 14,668 | 33.193473 | 147 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/w2l_decoder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wav2letter decoders.
"""
from collections import namedtuple, deque
import gc
import itertools as it
import numpy as np
import torch
import os.path as osp
import warnings
from fairseq import tasks
from fairseq.utils import apply_to_sample
from examples.speech_recognition.data.replabels import unpack_replabels
try:
from wav2letter.common import create_word_dict, load_words
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from wav2letter.decoder import (
CriterionType,
DecoderOptions,
KenLM,
LM,
LMState,
SmearingMode,
Trie,
LexiconDecoder,
LexiconFreeDecoder,
)
except:
warnings.warn(
"wav2letter python bindings are required to use this functionality. Please install from https://github.com/facebookresearch/wav2letter/wiki/Python-bindings"
)
LM = object
LMState = object
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
if args.criterion == "ctc":
self.criterion_type = CriterionType.CTC
self.blank = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.asg_transitions = None
elif args.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert len(self.asg_transitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {args.criterion}")
def generate(self, models, sample, **unused):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
# encoder_out = models[0].encoder(**encoder_input)
encoder_out = models[0](**encoder_input)
if self.criterion_type == CriterionType.CTC:
emissions = models[0].get_normalized_probs(encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = filter(lambda x: x >= 0, idxs)
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = (
tgt_dict.index("<ctc_blank>")
if "<ctc_blank>" in tgt_dict.indices
else tgt_dict.bos()
)
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.asg_transitions is None:
N = 768
# self.asg_transitions = torch.FloatTensor(N, N).zero_()
self.asg_transitions = []
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
False,
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append(
[
{
"tokens": self.get_tokens(result.tokens),
"score": result.score,
"words": [
self.word_dict.get_entry(x) for x in result.words if x >= 0
],
}
for result in nbest_results
]
)
return hypos
FairseqLMState = namedtuple("FairseqLMState", ["prefix", "incremental_state", "probs"])
class FairseqLM(LM):
def __init__(self, dictionary, model):
LM.__init__(self)
self.dictionary = dictionary
self.model = model
self.unk = self.dictionary.unk()
self.save_incremental = False # this currently does not work properly
self.max_cache = 20_000
model.cuda()
model.eval()
model.make_generation_fast_()
self.states = {}
self.stateq = deque()
def start(self, start_with_nothing):
state = LMState()
prefix = torch.LongTensor([[self.dictionary.eos()]])
incremental_state = {} if self.save_incremental else None
with torch.no_grad():
res = self.model(prefix.cuda(), incremental_state=incremental_state)
probs = self.model.get_normalized_probs(res, log_probs=True, sample=None)
if incremental_state is not None:
incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state)
self.states[state] = FairseqLMState(
prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy()
)
self.stateq.append(state)
return state
def score(self, state: LMState, token_index: int, no_cache: bool = False):
"""
Evaluate language model based on the current lm state and new word
Parameters:
-----------
state: current lm state
token_index: index of the word
(can be lexicon index then you should store inside LM the
mapping between indices of lexicon and lm, or lm index of a word)
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
curr_state = self.states[state]
def trim_cache(targ_size):
while len(self.stateq) > targ_size:
rem_k = self.stateq.popleft()
rem_st = self.states[rem_k]
rem_st = FairseqLMState(rem_st.prefix, None, None)
self.states[rem_k] = rem_st
if curr_state.probs is None:
new_incremental_state = (
curr_state.incremental_state.copy()
if curr_state.incremental_state is not None
else None
)
with torch.no_grad():
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cuda(), new_incremental_state
)
elif self.save_incremental:
new_incremental_state = {}
res = self.model(
torch.from_numpy(curr_state.prefix).cuda(),
incremental_state=new_incremental_state,
)
probs = self.model.get_normalized_probs(
res, log_probs=True, sample=None
)
if new_incremental_state is not None:
new_incremental_state = apply_to_sample(
lambda x: x.cpu(), new_incremental_state
)
curr_state = FairseqLMState(
curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy()
)
if not no_cache:
self.states[state] = curr_state
self.stateq.append(state)
score = curr_state.probs[token_index].item()
trim_cache(self.max_cache)
outstate = state.child(token_index)
if outstate not in self.states and not no_cache:
prefix = np.concatenate(
[curr_state.prefix, torch.LongTensor([[token_index]])], -1
)
incr_state = curr_state.incremental_state
self.states[outstate] = FairseqLMState(prefix, incr_state, None)
if token_index == self.unk:
score = float("-inf")
return outstate, score
def finish(self, state: LMState):
"""
Evaluate eos for language model based on the current lm state
Returns:
--------
(LMState, float): pair of (new state, score for the current word)
"""
return self.score(state, self.dictionary.eos())
def empty_cache(self):
self.states = {}
self.stateq = deque()
gc.collect()
class W2lFairseqLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.bos()
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
lm_args = checkpoint["args"]
lm_args.data = osp.dirname(args.kenlm_model)
print(lm_args)
task = tasks.setup_task(lm_args)
model = task.build_model(lm_args)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
self.decoder_opts = DecoderOptions(
args.beam,
int(getattr(args, "beam_size_token", len(tgt_dict))),
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
args.sil_weight,
0,
False,
self.criterion_type,
)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert (
tgt_dict.unk() not in spelling_idxs
), f"{spelling} {spelling_idxs}"
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
def idx_to_word(idx):
if self.unit_lm:
return self.idx_to_wrd[idx]
else:
return self.word_dict[idx]
def make_hypo(result):
hypo = {"tokens": self.get_tokens(result.tokens), "score": result.score}
if self.lexicon:
hypo["words"] = [idx_to_word(x) for x in result.words if x >= 0]
return hypo
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
results = self.decoder.decode(emissions_ptr, T, N)
nbest_results = results[: self.nbest]
hypos.append([make_hypo(result) for result in nbest_results])
self.lm.empty_cache()
return hypos
| 14,872 | 33.269585 | 164 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/criterions/cross_entropy_acc.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| 5,372 | 40.015267 | 85 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/criterions/ASG_loss.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from examples.speech_recognition.data.replabels import pack_replabels
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(
self,
task,
silence_token,
asg_transitions_init,
max_replabel,
linseg_updates,
hide_linseg_messages,
):
from wav2letter.criterion import ASGLoss, CriterionScaleMode
super().__init__(task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(silence_token)
if silence_token in self.tgt_dict
else None
)
self.max_replabel = max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = linseg_updates
self.linseg_message_state = "none" if hide_linseg_messages else "start"
@classmethod
def build_criterion(cls, args, task):
return cls(
task,
args.silence_token,
args.asg_transitions_init,
args.max_replabel,
args.linseg_updates,
args.hide_linseg_messages,
)
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| 5,857 | 33.25731 | 85 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/models/vggtransformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import LinearizedConvolution
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
# TODO: shouldn't subsampling_factor determined in advance ?
input_lengths = (src_lengths.float() / subsampling_factor).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}:".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
+ "of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
))
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
))
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, *_ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| 37,043 | 35.786495 | 88 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/models/w2l_conv_glu_enc.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules.fairseq_dropout import FairseqDropout
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(
FairseqDropout(dropout, module_name=self.__class__.__name__)
)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = self.dropouts[layer_idx](x)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = self.dropouts[-1](x)
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| 6,079 | 32.96648 | 87 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/datasets/asr_prep_json.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import concurrent.futures
from itertools import chain
import argparse
import os
import json
import sentencepiece as spm
import multiprocessing
from fairseq.data import Dictionary
MILLISECONDS_TO_SECONDS = 0.001
def process_sample(aud_path, lable, utt_id, sp, tgt_dict):
import torchaudio
input = {}
output = {}
si, ei = torchaudio.info(aud_path)
input["length_ms"] = int(si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS)
input["path"] = aud_path
token = " ".join(sp.EncodeAsPieces(lable))
ids = tgt_dict.encode_line(token, append_eos=False)
output["text"] = lable
output["token"] = token
output["tokenid"] = ', '.join(map(str, [t.tolist() for t in ids]))
return {utt_id: {"input": input, "output": output}}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-dirs", nargs="+", default=['-'], required=True,
help="input directories with audio files")
parser.add_argument("--labels", required=True,
help="aggregated input labels with format <ID LABEL> per line",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--spm-model", required=True,
help="sentencepiece model to use for encoding",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--dictionary", required=True,
help="file to load fairseq dictionary from",
type=argparse.FileType('r', encoding='UTF-8'))
parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav")
parser.add_argument("--output", required=True, type=argparse.FileType('w'),
help="path to save json output")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.spm_model.name)
tgt_dict = Dictionary.load(args.dictionary)
labels = {}
for line in args.labels:
(utt_id, label) = line.split(" ", 1)
labels[utt_id] = label
if len(labels) == 0:
raise Exception('No labels found in ', args.labels_path)
Sample = namedtuple('Sample', 'aud_path utt_id')
samples = []
for path, _, files in chain.from_iterable(os.walk(path) for path in args.audio_dirs):
for f in files:
if f.endswith(args.audio_format):
if len(os.path.splitext(f)) != 2:
raise Exception('Expect <utt_id.extension> file name. Got: ', f)
utt_id = os.path.splitext(f)[0]
if utt_id not in labels:
continue
samples.append(Sample(os.path.join(path, f), utt_id))
utts = {}
num_cpu = multiprocessing.cpu_count()
with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor:
future_to_sample = {executor.submit(process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict): s for s in samples}
for future in concurrent.futures.as_completed(future_to_sample):
try:
data = future.result()
except Exception as exc:
print('generated an exception: ', exc)
else:
utts.update(data)
json.dump({"utts": utts}, args.output, indent=4)
if __name__ == "__main__":
main()
| 3,670 | 36.845361 | 134 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/data/collaters.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
elif isinstance(target, list):
target = torch.LongTensor(target)
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
| 4,812 | 35.462121 | 84 | py |
RegularizedBN | RegularizedBN-main/examples/speech_recognition/data/data_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
# If there is less than 2 spectrograms, the variance cannot be computed (is NaN)
# and normalization is not possible, so return the item as it is
if features.size(0) < 2:
return features
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
| 3,429 | 32.960396 | 84 | py |
Subsets and Splits