code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def english_cleaners(text):
'''Pipeline for English text, including abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_abbreviations(text)
phonemes = phonemize(text, language='en-us', backend='espeak', strip=True)
phonemes = collapse_whitespace(phonemes)
return phonemes | Pipeline for English text, including abbreviation expansion. | english_cleaners | python | jaywalnut310/vits | text/cleaners.py | https://github.com/jaywalnut310/vits/blob/master/text/cleaners.py | MIT |
def english_cleaners2(text):
'''Pipeline for English text, including abbreviation expansion. + punctuation + stress'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_abbreviations(text)
phonemes = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=True)
phonemes = collapse_whitespace(phonemes)
return phonemes | Pipeline for English text, including abbreviation expansion. + punctuation + stress | english_cleaners2 | python | jaywalnut310/vits | text/cleaners.py | https://github.com/jaywalnut310/vits/blob/master/text/cleaners.py | MIT |
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
clean_text = _clean_text(text, cleaner_names)
for symbol in clean_text:
symbol_id = _symbol_to_id[symbol]
sequence += [symbol_id]
return sequence | Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text | text_to_sequence | python | jaywalnut310/vits | text/__init__.py | https://github.com/jaywalnut310/vits/blob/master/text/__init__.py | MIT |
def cleaned_text_to_sequence(cleaned_text):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
text: string to convert to a sequence
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = [_symbol_to_id[symbol] for symbol in cleaned_text]
return sequence | Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
text: string to convert to a sequence
Returns:
List of integers corresponding to the symbols in the text | cleaned_text_to_sequence | python | jaywalnut310/vits | text/__init__.py | https://github.com/jaywalnut310/vits/blob/master/text/__init__.py | MIT |
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
s = _id_to_symbol[symbol_id]
result += s
return result | Converts a sequence of IDs back to a string | sequence_to_text | python | jaywalnut310/vits | text/__init__.py | https://github.com/jaywalnut310/vits/blob/master/text/__init__.py | MIT |
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups=1
self.conv1 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
)
self.conv2 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
)
if self.bn==True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional() | Init.
Args:
features (int): number of features | __init__ | python | LiheYoung/Depth-Anything | depth_anything/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/depth_anything/blocks.py | Apache-2.0 |
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn==True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn==True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x) | Forward pass.
Args:
x (tensor): input
Returns:
tensor: output | forward | python | LiheYoung/Depth-Anything | depth_anything/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/depth_anything/blocks.py | Apache-2.0 |
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups=1
self.expand = expand
out_features = features
if self.expand==True:
out_features = features//2
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
self.size=size | Init.
Args:
features (int): number of features | __init__ | python | LiheYoung/Depth-Anything | depth_anything/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/depth_anything/blocks.py | Apache-2.0 |
def forward(self, *xs, size=None):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
output = self.resConfUnit2(output)
if (size is None) and (self.size is None):
modifier = {"scale_factor": 2}
elif size is None:
modifier = {"size": self.size}
else:
modifier = {"size": size}
output = nn.functional.interpolate(
output, **modifier, mode="bilinear", align_corners=self.align_corners
)
output = self.out_conv(output)
return output | Forward pass.
Returns:
tensor: output | forward | python | LiheYoung/Depth-Anything | depth_anything/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/depth_anything/blocks.py | Apache-2.0 |
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
"""
shape = list(sample["disparity"].shape)
if shape[0] >= size[0] and shape[1] >= size[1]:
return sample
scale = [0, 0]
scale[0] = size[0] / shape[0]
scale[1] = size[1] / shape[1]
scale = max(scale)
shape[0] = math.ceil(scale * shape[0])
shape[1] = math.ceil(scale * shape[1])
# resize
sample["image"] = cv2.resize(
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
)
sample["disparity"] = cv2.resize(
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return tuple(shape) | Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size | apply_min_size | python | LiheYoung/Depth-Anything | depth_anything/util/transform.py | https://github.com/LiheYoung/Depth-Anything/blob/master/depth_anything/util/transform.py | Apache-2.0 |
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method | Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound". | __init__ | python | LiheYoung/Depth-Anything | depth_anything/util/transform.py | https://github.com/LiheYoung/Depth-Anything/blob/master/depth_anything/util/transform.py | Apache-2.0 |
def fix_random_seed(seed: int):
"""
Fix random seed for reproducibility
Args:
seed (int): random seed
"""
import random
import numpy
import torch
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | Fix random seed for reproducibility
Args:
seed (int): random seed | fix_random_seed | python | LiheYoung/Depth-Anything | metric_depth/train_mix.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/train_mix.py | Apache-2.0 |
def infer(model, images, **kwargs):
"""Inference with flip augmentation"""
# images.shape = N, C, H, W
def get_depth_from_prediction(pred):
if isinstance(pred, torch.Tensor):
pred = pred # pass
elif isinstance(pred, (list, tuple)):
pred = pred[-1]
elif isinstance(pred, dict):
pred = pred['metric_depth'] if 'metric_depth' in pred else pred['out']
else:
raise NotImplementedError(f"Unknown output type {type(pred)}")
return pred
pred1 = model(images, **kwargs)
pred1 = get_depth_from_prediction(pred1)
pred2 = model(torch.flip(images, [3]), **kwargs)
pred2 = get_depth_from_prediction(pred2)
pred2 = torch.flip(pred2, [3])
mean_pred = 0.5 * (pred1 + pred2)
return mean_pred | Inference with flip augmentation | infer | python | LiheYoung/Depth-Anything | metric_depth/evaluate.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/evaluate.py | Apache-2.0 |
def build_model(config) -> DepthModel:
"""Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config
"""
module_name = f"zoedepth.models.{config.model}"
try:
module = import_module(module_name)
except ModuleNotFoundError as e:
# print the original error message
print(e)
raise ValueError(
f"Model {config.model} not found. Refer above error for details.") from e
try:
get_version = getattr(module, "get_version")
except AttributeError as e:
raise ValueError(
f"Model {config.model} has no get_version function.") from e
return get_version(config.version_name).build_from_config(config) | Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
This function should be used to construct models for training and evaluation.
Args:
config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
Returns:
torch.nn.Module: Model corresponding to name and version as specified in config | build_model | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/builder.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/builder.py | Apache-2.0 |
def _infer(self, x: torch.Tensor):
"""
Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
return self(x)['metric_depth'] | Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w) | _infer | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/depth_model.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/depth_model.py | Apache-2.0 |
def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor:
"""
Inference interface for the model with padding augmentation
Padding augmentation fixes the boundary artifacts in the output depth map.
Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.
This augmentation pads the input image and crops the prediction back to the original size / view.
Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to pad the input or not. Defaults to True.
fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
padding_mode (str, optional): padding mode. Defaults to "reflect".
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
# assert x is nchw and c = 3
assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim())
assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1])
if pad_input:
assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0"
pad_h = int(np.sqrt(x.shape[2]/2) * fh)
pad_w = int(np.sqrt(x.shape[3]/2) * fw)
padding = [pad_w, pad_w]
if pad_h > 0:
padding += [pad_h, pad_h]
x = F.pad(x, padding, mode=padding_mode, **kwargs)
out = self._infer(x)
if out.shape[-2:] != x.shape[-2:]:
out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
if pad_input:
# crop to the original size, handling the case where pad_h and pad_w is 0
if pad_h > 0:
out = out[:, :, pad_h:-pad_h,:]
if pad_w > 0:
out = out[:, :, :, pad_w:-pad_w]
return out | Inference interface for the model with padding augmentation
Padding augmentation fixes the boundary artifacts in the output depth map.
Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.
This augmentation pads the input image and crops the prediction back to the original size / view.
Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to pad the input or not. Defaults to True.
fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
padding_mode (str, optional): padding mode. Defaults to "reflect".
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w) | _infer_with_pad_aug | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/depth_model.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/depth_model.py | Apache-2.0 |
def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:
"""
Inference interface for the model with horizontal flip augmentation
Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
# infer with horizontal flip and average
out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)
out = (out + torch.flip(out_flip, dims=[3])) / 2
return out | Inference interface for the model with horizontal flip augmentation
Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w) | infer_with_flip_aug | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/depth_model.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/depth_model.py | Apache-2.0 |
def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:
"""
Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w)
"""
if with_flip_aug:
return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)
else:
return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs) | Inference interface for the model
Args:
x (torch.Tensor): input tensor of shape (b, c, h, w)
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
Returns:
torch.Tensor: output tensor of shape (b, 1, h, w) | infer | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/depth_model.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/depth_model.py | Apache-2.0 |
def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:
"""
Inference interface for the model for PIL image
Args:
pil_img (PIL.Image.Image): input PIL image
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy".
"""
x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)
out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)
if output_type == "numpy":
return out_tensor.squeeze().cpu().numpy()
elif output_type == "pil":
# uint16 is required for depth pil image
out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)
return Image.fromarray(out_16bit_numpy)
elif output_type == "tensor":
return out_tensor.squeeze().cpu()
else:
raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'") | Inference interface for the model for PIL image
Args:
pil_img (PIL.Image.Image): input PIL image
pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy". | infer_pil | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/depth_model.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/depth_model.py | Apache-2.0 |
def load_state_dict(model, state_dict):
"""Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
DataParallel prefixes state_dict keys with 'module.' when saving.
If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
If the model is a DataParallel model but the state_dict is not, then prefixes are added.
"""
state_dict = state_dict.get('model', state_dict)
# if model is a DataParallel model, then state_dict keys are prefixed with 'module.'
do_prefix = isinstance(
model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
state = {}
for k, v in state_dict.items():
if k.startswith('module.') and not do_prefix:
k = k[7:]
if not k.startswith('module.') and do_prefix:
k = 'module.' + k
state[k] = v
model.load_state_dict(state)
print("Loaded successfully")
return model | Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
DataParallel prefixes state_dict keys with 'module.' when saving.
If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
If the model is a DataParallel model but the state_dict is not, then prefixes are added. | load_state_dict | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/model_io.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/model_io.py | Apache-2.0 |
def load_state_from_resource(model, resource: str):
"""Loads weights to the model from a given resource. A resource can be of following types:
1. URL. Prefixed with "url::"
e.g. url::http(s)://url.resource.com/ckpt.pt
2. Local path. Prefixed with "local::"
e.g. local::/path/to/ckpt.pt
Args:
model (torch.nn.Module): Model
resource (str): resource string
Returns:
torch.nn.Module: Model with loaded weights
"""
print(f"Using pretrained resource {resource}")
if resource.startswith('url::'):
url = resource.split('url::')[1]
return load_state_dict_from_url(model, url, progress=True)
elif resource.startswith('local::'):
path = resource.split('local::')[1]
return load_wts(model, path)
else:
raise ValueError("Invalid resource type, only url:: and local:: are supported") | Loads weights to the model from a given resource. A resource can be of following types:
1. URL. Prefixed with "url::"
e.g. url::http(s)://url.resource.com/ckpt.pt
2. Local path. Prefixed with "local::"
e.g. local::/path/to/ckpt.pt
Args:
model (torch.nn.Module): Model
resource (str): resource string
Returns:
torch.nn.Module: Model with loaded weights | load_state_from_resource | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/model_io.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/model_io.py | Apache-2.0 |
def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10,
n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True,
midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
"""ZoeDepth model. This is the version of ZoeDepth that has a single metric head
Args:
core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
n_bins (int, optional): Number of bin centers. Defaults to 64.
bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3.
max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10.
n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
"""
super().__init__()
self.core = core
self.max_depth = max_depth
self.min_depth = min_depth
self.min_temp = min_temp
self.bin_centers_type = bin_centers_type
self.midas_lr_factor = midas_lr_factor
self.encoder_lr_factor = encoder_lr_factor
self.pos_enc_lr_factor = pos_enc_lr_factor
self.train_midas = train_midas
self.inverse_midas = inverse_midas
if self.encoder_lr_factor <= 0:
self.core.freeze_encoder(
freeze_rel_pos=self.pos_enc_lr_factor <= 0)
N_MIDAS_OUT = 32
btlnck_features = self.core.output_channels[0]
num_out_features = self.core.output_channels[1:]
# print('core output channels:', self.core.output_channels)
self.conv2 = nn.Conv2d(btlnck_features, btlnck_features,
kernel_size=1, stride=1, padding=0) # btlnck conv
if bin_centers_type == "normed":
SeedBinRegressorLayer = SeedBinRegressor
Attractor = AttractorLayer
elif bin_centers_type == "softplus":
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayerUnnormed
elif bin_centers_type == "hybrid1":
SeedBinRegressorLayer = SeedBinRegressor
Attractor = AttractorLayerUnnormed
elif bin_centers_type == "hybrid2":
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayer
else:
raise ValueError(
"bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
self.seed_bin_regressor = SeedBinRegressorLayer(
btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
self.projectors = nn.ModuleList([
Projector(num_out, bin_embedding_dim)
for num_out in num_out_features
])
self.attractors = nn.ModuleList([
Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth,
alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type)
for i in range(len(num_out_features))
])
last_in = N_MIDAS_OUT + 1 # +1 for relative depth
# use log binomial instead of softmax
self.conditional_log_binomial = ConditionalLogBinomial(
last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp) | ZoeDepth model. This is the version of ZoeDepth that has a single metric head
Args:
core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
n_bins (int, optional): Number of bin centers. Defaults to 64.
bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3.
max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10.
n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth/zoedepth_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth/zoedepth_v1.py | Apache-2.0 |
def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
"""
Args:
x (torch.Tensor): Input image tensor of shape (B, C, H, W)
return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False.
denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False.
return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False.
Returns:
dict: Dictionary containing the following keys:
- rel_depth (torch.Tensor): Relative depth map of shape (B, H, W)
- metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W)
- bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True
- probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True
"""
# print('input shape', x.shape)
b, c, h, w = x.shape
# print("input shape:", x.shape)
self.orig_input_width = w
self.orig_input_height = h
rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
# print("output shapes", rel_depth.shape, out.shape)
# print('rel_depth shape:', rel_depth.shape)
# print('out type:', type(out))
# for k in range(len(out)):
# print(k, out[k].shape)
outconv_activation = out[0]
btlnck = out[1]
x_blocks = out[2:]
x_d0 = self.conv2(btlnck)
x = x_d0
_, seed_b_centers = self.seed_bin_regressor(x)
if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
b_prev = (seed_b_centers - self.min_depth) / \
(self.max_depth - self.min_depth)
else:
b_prev = seed_b_centers
prev_b_embedding = self.seed_projector(x)
# unroll this loop for better performance
for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks):
b_embedding = projector(x)
b, b_centers = attractor(
b_embedding, b_prev, prev_b_embedding, interpolate=True)
b_prev = b.clone()
prev_b_embedding = b_embedding.clone()
last = outconv_activation
if self.inverse_midas:
# invert depth followed by normalization
rel_depth = 1.0 / (rel_depth + 1e-6)
rel_depth = (rel_depth - rel_depth.min()) / \
(rel_depth.max() - rel_depth.min())
# concat rel depth with last. First interpolate rel depth to last size
rel_cond = rel_depth.unsqueeze(1)
rel_cond = nn.functional.interpolate(
rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True)
last = torch.cat([last, rel_cond], dim=1)
b_embedding = nn.functional.interpolate(
b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
x = self.conditional_log_binomial(last, b_embedding)
# Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
# print(x.shape, b_centers.shape)
b_centers = nn.functional.interpolate(
b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
out = torch.sum(x * b_centers, dim=1, keepdim=True)
# Structure output dict
output = dict(metric_depth=out)
if return_final_centers or return_probs:
output['bin_centers'] = b_centers
if return_probs:
output['probs'] = x
return output | Args:
x (torch.Tensor): Input image tensor of shape (B, C, H, W)
return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False.
denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False.
return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False.
Returns:
dict: Dictionary containing the following keys:
- rel_depth (torch.Tensor): Relative depth map of shape (B, H, W)
- metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W)
- bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True
- probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth/zoedepth_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth/zoedepth_v1.py | Apache-2.0 |
def get_lr_params(self, lr):
"""
Learning rate configuration for different layers of the model
Args:
lr (float) : Base learning rate
Returns:
list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
"""
param_conf = []
if self.train_midas:
if self.encoder_lr_factor > 0:
param_conf.append({'params': self.core.get_enc_params_except_rel_pos(
), 'lr': lr / self.encoder_lr_factor})
if self.pos_enc_lr_factor > 0:
param_conf.append(
{'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor})
# midas_params = self.core.core.scratch.parameters()
midas_params = self.core.core.depth_head.parameters()
midas_lr_factor = self.midas_lr_factor
param_conf.append(
{'params': midas_params, 'lr': lr / midas_lr_factor})
remaining_modules = []
for name, child in self.named_children():
if name != 'core':
remaining_modules.append(child)
remaining_params = itertools.chain(
*[child.parameters() for child in remaining_modules])
param_conf.append({'params': remaining_params, 'lr': lr})
return param_conf | Learning rate configuration for different layers of the model
Args:
lr (float) : Base learning rate
Returns:
list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. | get_lr_params | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth/zoedepth_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth/zoedepth_v1.py | Apache-2.0 |
def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False):
"""ViT-like transformer block
Args:
in_channels (int): Input channels
patch_size (int, optional): patch size. Defaults to 10.
embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128.
num_heads (int, optional): number of attention heads. Defaults to 4.
use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as "class token"). Defaults to False.
"""
super(PatchTransformerEncoder, self).__init__()
self.use_class_token = use_class_token
encoder_layers = nn.TransformerEncoderLayer(
embedding_dim, num_heads, dim_feedforward=1024)
self.transformer_encoder = nn.TransformerEncoder(
encoder_layers, num_layers=4) # takes shape S,N,E
self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim,
kernel_size=patch_size, stride=patch_size, padding=0) | ViT-like transformer block
Args:
in_channels (int): Input channels
patch_size (int, optional): patch size. Defaults to 10.
embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128.
num_heads (int, optional): number of attention heads. Defaults to 4.
use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as "class token"). Defaults to False. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/patch_transformer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/patch_transformer.py | Apache-2.0 |
def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'):
"""Generate positional encodings
Args:
sequence_length (int): Sequence length
embedding_dim (int): Embedding dimension
Returns:
torch.Tensor SBE: Positional encodings
"""
position = torch.arange(
0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1)
index = torch.arange(
0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0)
div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))
pos_encoding = position * div_term
pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)
pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1)
return pos_encoding | Generate positional encodings
Args:
sequence_length (int): Sequence length
embedding_dim (int): Embedding dimension
Returns:
torch.Tensor SBE: Positional encodings | positional_encoding_1d | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/patch_transformer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/patch_transformer.py | Apache-2.0 |
def forward(self, x):
"""Forward pass
Args:
x (torch.Tensor - NCHW): Input feature tensor
Returns:
torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim
"""
embeddings = self.embedding_convPxP(x).flatten(
2) # .shape = n,c,s = n, embedding_dim, s
if self.use_class_token:
# extra special token at start ?
embeddings = nn.functional.pad(embeddings, (1, 0))
# change to S,N,E format required by transformer
embeddings = embeddings.permute(2, 0, 1)
S, N, E = embeddings.shape
embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device)
x = self.transformer_encoder(embeddings) # .shape = S, N, E
return x | Forward pass
Args:
x (torch.Tensor - NCHW): Input feature tensor
Returns:
torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/patch_transformer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/patch_transformer.py | Apache-2.0 |
def log_binom(n, k, eps=1e-7):
""" log(nCk) using stirling approximation """
n = n + eps
k = k + eps
return n * torch.log(n) - k * torch.log(k) - (n-k) * torch.log(n-k+eps) | log(nCk) using stirling approximation | log_binom | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/dist_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/dist_layers.py | Apache-2.0 |
def __init__(self, n_classes=256, act=torch.softmax):
"""Compute log binomial distribution for n_classes
Args:
n_classes (int, optional): number of output classes. Defaults to 256.
"""
super().__init__()
self.K = n_classes
self.act = act
self.register_buffer('k_idx', torch.arange(
0, n_classes).view(1, -1, 1, 1))
self.register_buffer('K_minus_1', torch.Tensor(
[self.K-1]).view(1, -1, 1, 1)) | Compute log binomial distribution for n_classes
Args:
n_classes (int, optional): number of output classes. Defaults to 256. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/dist_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/dist_layers.py | Apache-2.0 |
def forward(self, x, t=1., eps=1e-4):
"""Compute log binomial distribution for x
Args:
x (torch.Tensor - NCHW): probabilities
t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1..
eps (float, optional): Small number for numerical stability. Defaults to 1e-4.
Returns:
torch.Tensor -NCHW: log binomial distribution logbinomial(p;t)
"""
if x.ndim == 3:
x = x.unsqueeze(1) # make it nchw
one_minus_x = torch.clamp(1 - x, eps, 1)
x = torch.clamp(x, eps, 1)
y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * \
torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x)
return self.act(y/t, dim=1) | Compute log binomial distribution for x
Args:
x (torch.Tensor - NCHW): probabilities
t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1..
eps (float, optional): Small number for numerical stability. Defaults to 1e-4.
Returns:
torch.Tensor -NCHW: log binomial distribution logbinomial(p;t) | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/dist_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/dist_layers.py | Apache-2.0 |
def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):
"""Conditional Log Binomial distribution
Args:
in_features (int): number of input channels in main feature
condition_dim (int): number of input channels in condition feature
n_classes (int, optional): Number of classes. Defaults to 256.
bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.
p_eps (float, optional): small eps value. Defaults to 1e-4.
max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.
min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.
"""
super().__init__()
self.p_eps = p_eps
self.max_temp = max_temp
self.min_temp = min_temp
self.log_binomial_transform = LogBinomial(n_classes, act=act)
bottleneck = (in_features + condition_dim) // bottleneck_factor
self.mlp = nn.Sequential(
nn.Conv2d(in_features + condition_dim, bottleneck,
kernel_size=1, stride=1, padding=0),
nn.GELU(),
# 2 for p linear norm, 2 for t linear norm
nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),
nn.Softplus()
) | Conditional Log Binomial distribution
Args:
in_features (int): number of input channels in main feature
condition_dim (int): number of input channels in condition feature
n_classes (int, optional): Number of classes. Defaults to 256.
bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.
p_eps (float, optional): small eps value. Defaults to 1e-4.
max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.
min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/dist_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/dist_layers.py | Apache-2.0 |
def forward(self, x, cond):
"""Forward pass
Args:
x (torch.Tensor - NCHW): Main feature
cond (torch.Tensor - NCHW): condition feature
Returns:
torch.Tensor: Output log binomial distribution
"""
pt = self.mlp(torch.concat((x, cond), dim=1))
p, t = pt[:, :2, ...], pt[:, 2:, ...]
p = p + self.p_eps
p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])
t = t + self.p_eps
t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])
t = t.unsqueeze(1)
t = (self.max_temp - self.min_temp) * t + self.min_temp
return self.log_binomial_transform(p, t) | Forward pass
Args:
x (torch.Tensor - NCHW): Main feature
cond (torch.Tensor - NCHW): condition feature
Returns:
torch.Tensor: Output log binomial distribution | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/dist_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/dist_layers.py | Apache-2.0 |
def exp_attractor(dx, alpha: float = 300, gamma: int = 2):
"""Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor
Args:
dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
Returns:
torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc
"""
return torch.exp(-alpha*(torch.abs(dx)**gamma)) * (dx) | Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor
Args:
dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
Returns:
torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc | exp_attractor | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/attractor.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/attractor.py | Apache-2.0 |
def inv_attractor(dx, alpha: float = 300, gamma: int = 2):
"""Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
This is the default one according to the accompanying paper.
Args:
dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
Returns:
torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
"""
return dx.div(1+alpha*dx.pow(gamma)) | Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
This is the default one according to the accompanying paper.
Args:
dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
Returns:
torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc | inv_attractor | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/attractor.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/attractor.py | Apache-2.0 |
def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,
alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):
"""
Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
"""
super().__init__()
self.n_attractors = n_attractors
self.n_bins = n_bins
self.min_depth = min_depth
self.max_depth = max_depth
self.alpha = alpha
self.gamma = gamma
self.kind = kind
self.attractor_type = attractor_type
self.memory_efficient = memory_efficient
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm
nn.ReLU(inplace=True)
) | Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth) | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/attractor.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/attractor.py | Apache-2.0 |
def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
"""
Args:
x (torch.Tensor) : feature block; shape - n, c, h, w
b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
Returns:
tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w
"""
if prev_b_embedding is not None:
if interpolate:
prev_b_embedding = nn.functional.interpolate(
prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
x = x + prev_b_embedding
A = self._net(x)
eps = 1e-3
A = A + eps
n, c, h, w = A.shape
A = A.view(n, self.n_attractors, 2, h, w)
A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w
A_normed = A[:, :, 0, ...] # n, na, h, w
b_prev = nn.functional.interpolate(
b_prev, (h, w), mode='bilinear', align_corners=True)
b_centers = b_prev
if self.attractor_type == 'exp':
dist = exp_attractor
else:
dist = inv_attractor
if not self.memory_efficient:
func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
# .shape N, nbins, h, w
delta_c = func(dist(A_normed.unsqueeze(
2) - b_centers.unsqueeze(1)), dim=1)
else:
delta_c = torch.zeros_like(b_centers, device=b_centers.device)
for i in range(self.n_attractors):
# .shape N, nbins, h, w
delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)
if self.kind == 'mean':
delta_c = delta_c / self.n_attractors
b_new_centers = b_centers + delta_c
B_centers = (self.max_depth - self.min_depth) * \
b_new_centers + self.min_depth
B_centers, _ = torch.sort(B_centers, dim=1)
B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)
return b_new_centers, B_centers | Args:
x (torch.Tensor) : feature block; shape - n, c, h, w
b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
Returns:
tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/attractor.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/attractor.py | Apache-2.0 |
def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,
alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):
"""
Attractor layer for bin centers. Bin centers are unbounded
"""
super().__init__()
self.n_attractors = n_attractors
self.n_bins = n_bins
self.min_depth = min_depth
self.max_depth = max_depth
self.alpha = alpha
self.gamma = gamma
self.kind = kind
self.attractor_type = attractor_type
self.memory_efficient = memory_efficient
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),
nn.Softplus()
) | Attractor layer for bin centers. Bin centers are unbounded | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/attractor.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/attractor.py | Apache-2.0 |
def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
"""
Args:
x (torch.Tensor) : feature block; shape - n, c, h, w
b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
Returns:
tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version
"""
if prev_b_embedding is not None:
if interpolate:
prev_b_embedding = nn.functional.interpolate(
prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
x = x + prev_b_embedding
A = self._net(x)
n, c, h, w = A.shape
b_prev = nn.functional.interpolate(
b_prev, (h, w), mode='bilinear', align_corners=True)
b_centers = b_prev
if self.attractor_type == 'exp':
dist = exp_attractor
else:
dist = inv_attractor
if not self.memory_efficient:
func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
# .shape N, nbins, h, w
delta_c = func(
dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)
else:
delta_c = torch.zeros_like(b_centers, device=b_centers.device)
for i in range(self.n_attractors):
delta_c += dist(A[:, i, ...].unsqueeze(1) -
b_centers) # .shape N, nbins, h, w
if self.kind == 'mean':
delta_c = delta_c / self.n_attractors
b_new_centers = b_centers + delta_c
B_centers = b_new_centers
return b_new_centers, B_centers | Args:
x (torch.Tensor) : feature block; shape - n, c, h, w
b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
Returns:
tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/attractor.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/attractor.py | Apache-2.0 |
def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
"""Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.
Args:
in_features (int): input channels
n_bins (int, optional): Number of bin centers. Defaults to 16.
mlp_dim (int, optional): Hidden dimension. Defaults to 256.
min_depth (float, optional): Min depth value. Defaults to 1e-3.
max_depth (float, optional): Max depth value. Defaults to 10.
"""
super().__init__()
self.version = "1_1"
self.min_depth = min_depth
self.max_depth = max_depth
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
nn.ReLU(inplace=True)
) | Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.
Args:
in_features (int): input channels
n_bins (int, optional): Number of bin centers. Defaults to 16.
mlp_dim (int, optional): Hidden dimension. Defaults to 256.
min_depth (float, optional): Min depth value. Defaults to 1e-3.
max_depth (float, optional): Max depth value. Defaults to 10. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/localbins_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/localbins_layers.py | Apache-2.0 |
def forward(self, x):
"""
Returns tensor of bin_width vectors (centers). One vector b for every pixel
"""
B = self._net(x)
eps = 1e-3
B = B + eps
B_widths_normed = B / B.sum(dim=1, keepdim=True)
B_widths = (self.max_depth - self.min_depth) * \
B_widths_normed # .shape NCHW
# pad has the form (left, right, top, bottom, front, back)
B_widths = nn.functional.pad(
B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)
B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW
B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])
return B_widths_normed, B_centers | Returns tensor of bin_width vectors (centers). One vector b for every pixel | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/localbins_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/localbins_layers.py | Apache-2.0 |
def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
"""Bin center regressor network. Bin centers are unbounded
Args:
in_features (int): input channels
n_bins (int, optional): Number of bin centers. Defaults to 16.
mlp_dim (int, optional): Hidden dimension. Defaults to 256.
min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
"""
super().__init__()
self.version = "1_1"
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
nn.Softplus()
) | Bin center regressor network. Bin centers are unbounded
Args:
in_features (int): input channels
n_bins (int, optional): Number of bin centers. Defaults to 16.
mlp_dim (int, optional): Hidden dimension. Defaults to 256.
min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor) | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/localbins_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/localbins_layers.py | Apache-2.0 |
def forward(self, x):
"""
Returns tensor of bin_width vectors (centers). One vector b for every pixel
"""
B_centers = self._net(x)
return B_centers, B_centers | Returns tensor of bin_width vectors (centers). One vector b for every pixel | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/localbins_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/localbins_layers.py | Apache-2.0 |
def __init__(self, in_features, out_features, mlp_dim=128):
"""Projector MLP
Args:
in_features (int): input channels
out_features (int): output channels
mlp_dim (int, optional): hidden dimension. Defaults to 128.
"""
super().__init__()
self._net = nn.Sequential(
nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
nn.ReLU(inplace=True),
nn.Conv2d(mlp_dim, out_features, 1, 1, 0),
) | Projector MLP
Args:
in_features (int): input channels
out_features (int): output channels
mlp_dim (int, optional): hidden dimension. Defaults to 128. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/localbins_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/localbins_layers.py | Apache-2.0 |
def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
"""
x : feature block; shape - n, c, h, w
b_prev : previous bin widths normed; shape - n, prev_nbins, h, w
"""
if prev_b_embedding is not None:
if interpolate:
prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
x = x + prev_b_embedding
S = self._net(x)
eps = 1e-3
S = S + eps
n, c, h, w = S.shape
S = S.view(n, self.prev_nbins, self.split_factor, h, w)
S_normed = S / S.sum(dim=2, keepdim=True) # fractional splits
b_prev = nn.functional.interpolate(b_prev, (h,w), mode='bilinear', align_corners=True)
b_prev = b_prev / b_prev.sum(dim=1, keepdim=True) # renormalize for gurantees
# print(b_prev.shape, S_normed.shape)
# if is_for_query:(1).expand(-1, b_prev.size(0)//n, -1, -1, -1, -1).flatten(0,1) # TODO ? can replace all this with a single torch.repeat?
b = b_prev.unsqueeze(2) * S_normed
b = b.flatten(1,2) # .shape n, prev_nbins * split_factor, h, w
# calculate bin centers for loss calculation
B_widths = (self.max_depth - self.min_depth) * b # .shape N, nprev * splitfactor, H, W
# pad has the form (left, right, top, bottom, front, back)
B_widths = nn.functional.pad(B_widths, (0,0,0,0,1,0), mode='constant', value=self.min_depth)
B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW
B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:,1:,...])
return b, B_centers | x : feature block; shape - n, c, h, w
b_prev : previous bin widths normed; shape - n, prev_nbins, h, w | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/layers/localbins_layers.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/layers/localbins_layers.py | Apache-2.0 |
def denormalize(x):
"""Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
return x * std + mean | Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input | denormalize | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/midas.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/midas.py | Apache-2.0 |
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
print("Params passed to Resize transform:")
print("\twidth: ", width)
print("\theight: ", height)
print("\tresize_target: ", resize_target)
print("\tkeep_aspect_ratio: ", keep_aspect_ratio)
print("\tensure_multiple_of: ", ensure_multiple_of)
print("\tresize_method: ", resize_method)
self.__width = width
self.__height = height
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method | Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound". | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/midas.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/midas.py | Apache-2.0 |
def denormalize(x):
"""Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
return x * std + mean | Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input | denormalize | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/depth_anything.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/depth_anything.py | Apache-2.0 |
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
print("Params passed to Resize transform:")
print("\twidth: ", width)
print("\theight: ", height)
print("\tresize_target: ", resize_target)
print("\tkeep_aspect_ratio: ", keep_aspect_ratio)
print("\tensure_multiple_of: ", ensure_multiple_of)
print("\tresize_method: ", resize_method)
self.__width = width
self.__height = height
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method | Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound". | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/depth_anything.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/depth_anything.py | Apache-2.0 |
def __init__(self, features, activation, bn):
"""Init.
Args:
features (int): number of features
"""
super().__init__()
self.bn = bn
self.groups=1
self.conv1 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
)
self.conv2 = nn.Conv2d(
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
)
if self.bn==True:
self.bn1 = nn.BatchNorm2d(features)
self.bn2 = nn.BatchNorm2d(features)
self.activation = activation
self.skip_add = nn.quantized.FloatFunctional() | Init.
Args:
features (int): number of features | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | Apache-2.0 |
def forward(self, x):
"""Forward pass.
Args:
x (tensor): input
Returns:
tensor: output
"""
out = self.activation(x)
out = self.conv1(out)
if self.bn==True:
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
if self.bn==True:
out = self.bn2(out)
if self.groups > 1:
out = self.conv_merge(out)
return self.skip_add.add(out, x) | Forward pass.
Args:
x (tensor): input
Returns:
tensor: output | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | Apache-2.0 |
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
"""Init.
Args:
features (int): number of features
"""
super(FeatureFusionBlock, self).__init__()
self.deconv = deconv
self.align_corners = align_corners
self.groups=1
self.expand = expand
out_features = features
if self.expand==True:
out_features = features//2
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
self.skip_add = nn.quantized.FloatFunctional()
self.size=size | Init.
Args:
features (int): number of features | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | Apache-2.0 |
def forward(self, *xs, size=None):
"""Forward pass.
Returns:
tensor: output
"""
output = xs[0]
if len(xs) == 2:
res = self.resConfUnit1(xs[1])
output = self.skip_add.add(output, res)
output = self.resConfUnit2(output)
if (size is None) and (self.size is None):
modifier = {"scale_factor": 2}
elif size is None:
modifier = {"size": self.size}
else:
modifier = {"size": size}
output = nn.functional.interpolate(
output, **modifier, mode="bilinear", align_corners=self.align_corners
)
output = self.out_conv(output)
return output | Forward pass.
Returns:
tensor: output | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/base_models/dpt_dinov2/blocks.py | Apache-2.0 |
def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128,
n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp',
min_temp=5, max_temp=50,
memory_efficient=False, train_midas=True,
is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
"""ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts.
Args:
core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys:
"name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float)
The length of this list determines the number of metric heads.
bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed".
bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False.
train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True.
midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
"""
super().__init__()
self.core = core
self.bin_conf = bin_conf
self.min_temp = min_temp
self.max_temp = max_temp
self.memory_efficient = memory_efficient
self.train_midas = train_midas
self.is_midas_pretrained = is_midas_pretrained
self.midas_lr_factor = midas_lr_factor
self.encoder_lr_factor = encoder_lr_factor
self.pos_enc_lr_factor = pos_enc_lr_factor
self.inverse_midas = inverse_midas
N_MIDAS_OUT = 32
btlnck_features = self.core.output_channels[0]
num_out_features = self.core.output_channels[1:]
# self.scales = [16, 8, 4, 2] # spatial scale factors
self.conv2 = nn.Conv2d(
btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0)
# Transformer classifier on the bottleneck
self.patch_transformer = PatchTransformerEncoder(
btlnck_features, 1, 128, use_class_token=True)
self.mlp_classifier = nn.Sequential(
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 2)
)
if bin_centers_type == "normed":
SeedBinRegressorLayer = SeedBinRegressor
Attractor = AttractorLayer
elif bin_centers_type == "softplus":
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayerUnnormed
elif bin_centers_type == "hybrid1":
SeedBinRegressorLayer = SeedBinRegressor
Attractor = AttractorLayerUnnormed
elif bin_centers_type == "hybrid2":
SeedBinRegressorLayer = SeedBinRegressorUnnormed
Attractor = AttractorLayer
else:
raise ValueError(
"bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
self.bin_centers_type = bin_centers_type
# We have bins for each bin conf.
# Create a map (ModuleDict) of 'name' -> seed_bin_regressor
self.seed_bin_regressors = nn.ModuleDict(
{conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim//2, min_depth=conf["min_depth"], max_depth=conf["max_depth"])
for conf in bin_conf}
)
self.seed_projector = Projector(
btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim//2)
self.projectors = nn.ModuleList([
Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim//2)
for num_out in num_out_features
])
# Create a map (ModuleDict) of 'name' -> attractors (ModuleList)
self.attractors = nn.ModuleDict(
{conf['name']: nn.ModuleList([
Attractor(bin_embedding_dim, n_attractors[i],
mlp_dim=bin_embedding_dim, alpha=attractor_alpha,
gamma=attractor_gamma, kind=attractor_kind,
attractor_type=attractor_type, memory_efficient=memory_efficient,
min_depth=conf["min_depth"], max_depth=conf["max_depth"])
for i in range(len(n_attractors))
])
for conf in bin_conf}
)
last_in = N_MIDAS_OUT
# conditional log binomial for each bin conf
self.conditional_log_binomial = nn.ModuleDict(
{conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp)
for conf in bin_conf}
) | ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts.
Args:
core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys:
"name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float)
The length of this list determines the number of metric heads.
bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed".
bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False.
train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True.
midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
"""
Args:
x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain.
return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False.
denorm (bool, optional): Whether to denormalize the input image. Defaults to False.
return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False.
Returns:
dict: Dictionary of outputs with keys:
- "rel_depth": Relative depth map of shape (B, 1, H, W)
- "metric_depth": Metric depth map of shape (B, 1, H, W)
- "domain_logits": Domain logits of shape (B, 2)
- "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True
- "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True
"""
b, c, h, w = x.shape
self.orig_input_width = w
self.orig_input_height = h
rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
outconv_activation = out[0]
btlnck = out[1]
x_blocks = out[2:]
x_d0 = self.conv2(btlnck)
x = x_d0
# Predict which path to take
embedding = self.patch_transformer(x)[0] # N, E
domain_logits = self.mlp_classifier(embedding) # N, 2
domain_vote = torch.softmax(domain_logits.sum(
dim=0, keepdim=True), dim=-1) # 1, 2
# Get the path
bin_conf_name = ["nyu", "kitti"][torch.argmax(
domain_vote, dim=-1).squeeze().item()]
try:
conf = [c for c in self.bin_conf if c.name == bin_conf_name][0]
except IndexError:
raise ValueError(
f"bin_conf_name {bin_conf_name} not found in bin_confs")
min_depth = conf['min_depth']
max_depth = conf['max_depth']
seed_bin_regressor = self.seed_bin_regressors[bin_conf_name]
_, seed_b_centers = seed_bin_regressor(x)
if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
b_prev = (seed_b_centers - min_depth)/(max_depth - min_depth)
else:
b_prev = seed_b_centers
prev_b_embedding = self.seed_projector(x)
attractors = self.attractors[bin_conf_name]
for projector, attractor, x in zip(self.projectors, attractors, x_blocks):
b_embedding = projector(x)
b, b_centers = attractor(
b_embedding, b_prev, prev_b_embedding, interpolate=True)
b_prev = b
prev_b_embedding = b_embedding
last = outconv_activation
b_centers = nn.functional.interpolate(
b_centers, last.shape[-2:], mode='bilinear', align_corners=True)
b_embedding = nn.functional.interpolate(
b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
clb = self.conditional_log_binomial[bin_conf_name]
x = clb(last, b_embedding)
# Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
# print(x.shape, b_centers.shape)
# b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
out = torch.sum(x * b_centers, dim=1, keepdim=True)
output = dict(domain_logits=domain_logits, metric_depth=out)
if return_final_centers or return_probs:
output['bin_centers'] = b_centers
if return_probs:
output['probs'] = x
return output | Args:
x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain.
return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False.
denorm (bool, optional): Whether to denormalize the input image. Defaults to False.
return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False.
Returns:
dict: Dictionary of outputs with keys:
- "rel_depth": Relative depth map of shape (B, 1, H, W)
- "metric_depth": Metric depth map of shape (B, 1, H, W)
- "domain_logits": Domain logits of shape (B, 2)
- "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True
- "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True | forward | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def get_lr_params(self, lr):
"""
Learning rate configuration for different layers of the model
Args:
lr (float) : Base learning rate
Returns:
list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
"""
param_conf = []
if self.train_midas:
def get_rel_pos_params():
for name, p in self.core.core.pretrained.named_parameters():
# if "relative_position" in name:
if "pos_embed" in name:
yield p
def get_enc_params_except_rel_pos():
for name, p in self.core.core.pretrained.named_parameters():
# if "relative_position" not in name:
if "pos_embed" not in name:
yield p
encoder_params = get_enc_params_except_rel_pos()
rel_pos_params = get_rel_pos_params()
# midas_params = self.core.core.scratch.parameters()
midas_params = self.core.core.depth_head.parameters()
midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0
param_conf.extend([
{'params': encoder_params, 'lr': lr / self.encoder_lr_factor},
{'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor},
{'params': midas_params, 'lr': lr / midas_lr_factor}
])
remaining_modules = []
for name, child in self.named_children():
if name != 'core':
remaining_modules.append(child)
remaining_params = itertools.chain(
*[child.parameters() for child in remaining_modules])
param_conf.append({'params': remaining_params, 'lr': lr})
return param_conf | Learning rate configuration for different layers of the model
Args:
lr (float) : Base learning rate
Returns:
list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. | get_lr_params | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def get_conf_parameters(self, conf_name):
"""
Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
"""
params = []
for name, child in self.named_children():
if isinstance(child, nn.ModuleDict):
for bin_conf_name, module in child.items():
if bin_conf_name == conf_name:
params += list(module.parameters())
return params | Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration | get_conf_parameters | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def freeze_conf(self, conf_name):
"""
Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
"""
for p in self.get_conf_parameters(conf_name):
p.requires_grad = False | Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration | freeze_conf | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def unfreeze_conf(self, conf_name):
"""
Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
"""
for p in self.get_conf_parameters(conf_name):
p.requires_grad = True | Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration | unfreeze_conf | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def freeze_all_confs(self):
"""
Freezes all the parameters of all the ModuleDicts children
"""
for name, child in self.named_children():
if isinstance(child, nn.ModuleDict):
for bin_conf_name, module in child.items():
for p in module.parameters():
p.requires_grad = False | Freezes all the parameters of all the ModuleDicts children | freeze_all_confs | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py | Apache-2.0 |
def get_trainer(config):
"""Builds and returns a trainer based on the config.
Args:
config (dict): the config dict (typically constructed using utils.config.get_config)
config.trainer (str): the name of the trainer to use. The module named "{config.trainer}_trainer" must exist in trainers root module
Raises:
ValueError: If the specified trainer does not exist under trainers/ folder
Returns:
Trainer (inherited from zoedepth.trainers.BaseTrainer): The Trainer object
"""
assert "trainer" in config and config.trainer is not None and config.trainer != '', "Trainer not specified. Config: {0}".format(
config)
try:
Trainer = getattr(import_module(
f"zoedepth.trainers.{config.trainer}_trainer"), 'Trainer')
except ModuleNotFoundError as e:
raise ValueError(f"Trainer {config.trainer}_trainer not found.") from e
return Trainer | Builds and returns a trainer based on the config.
Args:
config (dict): the config dict (typically constructed using utils.config.get_config)
config.trainer (str): the name of the trainer to use. The module named "{config.trainer}_trainer" must exist in trainers root module
Raises:
ValueError: If the specified trainer does not exist under trainers/ folder
Returns:
Trainer (inherited from zoedepth.trainers.BaseTrainer): The Trainer object | get_trainer | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/trainers/builder.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/trainers/builder.py | Apache-2.0 |
def __call__(self, prob, gt):
"""
:param prob: ordinal regression probability, N x 2*Ord Num x H x W, torch.Tensor
:param gt: depth ground truth, NXHxW, torch.Tensor
:return: loss: loss value, torch.float
"""
# N, C, H, W = prob.shape
valid_mask = gt > 0.
ord_label, mask = self._create_ord_label(gt)
# print("prob shape: {}, ord label shape: {}".format(prob.shape, ord_label.shape))
entropy = -prob * ord_label
loss = torch.sum(entropy, dim=1)[valid_mask.squeeze(1)]
return loss.mean() | :param prob: ordinal regression probability, N x 2*Ord Num x H x W, torch.Tensor
:param gt: depth ground truth, NXHxW, torch.Tensor
:return: loss: loss value, torch.float | __call__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/trainers/loss.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/trainers/loss.py | Apache-2.0 |
def _dequantize_depth(self, depth):
"""
Inverse of quantization
depth : NCHW -> N1HW
""" | Inverse of quantization
depth : NCHW -> N1HW | _dequantize_depth | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/trainers/loss.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/trainers/loss.py | Apache-2.0 |
def train_on_batch(self, batch, train_step):
"""
Expects a batch of images and depth as input
batch["image"].shape : batch_size, c, h, w
batch["depth"].shape : batch_size, 1, h, w
Assumes all images in a batch are from the same dataset
"""
images, depths_gt = batch['image'].to(
self.device), batch['depth'].to(self.device)
# batch['dataset'] is a tensor strings all valued either 'nyu' or 'kitti'. labels nyu -> 0, kitti -> 1
dataset = batch['dataset'][0]
# Convert to 0s or 1s
domain_labels = torch.Tensor([dataset == 'kitti' for _ in range(
images.size(0))]).to(torch.long).to(self.device)
# m = self.model.module if self.config.multigpu else self.model
b, c, h, w = images.size()
mask = batch["mask"].to(self.device).to(torch.bool)
losses = {}
with amp.autocast(enabled=self.config.use_amp):
output = self.model(images)
pred_depths = output['metric_depth']
domain_logits = output['domain_logits']
l_si, pred = self.silog_loss(
pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True)
loss = self.config.w_si * l_si
losses[self.silog_loss.name] = l_si
if self.config.w_grad > 0:
l_grad = self.grad_loss(pred, depths_gt, mask=mask)
loss = loss + self.config.w_grad * l_grad
losses[self.grad_loss.name] = l_grad
else:
l_grad = torch.Tensor([0])
if self.config.w_domain > 0:
l_domain = self.domain_classifier_loss(
domain_logits, domain_labels)
loss = loss + self.config.w_domain * l_domain
losses["DomainLoss"] = l_domain
else:
l_domain = torch.Tensor([0.])
self.scaler.scale(loss).backward()
if self.config.clip_grad > 0:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(
self.model.parameters(), self.config.clip_grad)
self.scaler.step(self.optimizer)
if self.should_log and self.step > 1 and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0:
depths_gt[torch.logical_not(mask)] = -99
self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred[0]}, prefix="Train",
min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth'])
self.scaler.update()
self.optimizer.zero_grad(set_to_none=True)
return losses | Expects a batch of images and depth as input
batch["image"].shape : batch_size, c, h, w
batch["depth"].shape : batch_size, 1, h, w
Assumes all images in a batch are from the same dataset | train_on_batch | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/trainers/zoedepth_nk_trainer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/trainers/zoedepth_nk_trainer.py | Apache-2.0 |
def train_on_batch(self, batch, train_step):
"""
Expects a batch of images and depth as input
batch["image"].shape : batch_size, c, h, w
batch["depth"].shape : batch_size, 1, h, w
"""
images, depths_gt = batch['image'].to(
self.device), batch['depth'].to(self.device)
dataset = batch['dataset'][0]
b, c, h, w = images.size()
mask = batch["mask"].to(self.device).to(torch.bool)
losses = {}
with amp.autocast(enabled=self.config.use_amp):
output = self.model(images)
pred_depths = output['metric_depth']
l_si, pred = self.silog_loss(
pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True)
loss = self.config.w_si * l_si
losses[self.silog_loss.name] = l_si
if self.config.w_grad > 0:
l_grad = self.grad_loss(pred, depths_gt, mask=mask)
loss = loss + self.config.w_grad * l_grad
losses[self.grad_loss.name] = l_grad
else:
l_grad = torch.Tensor([0])
self.scaler.scale(loss).backward()
if self.config.clip_grad > 0:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(
self.model.parameters(), self.config.clip_grad)
self.scaler.step(self.optimizer)
if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0:
# -99 is treated as invalid depth in the log_images function and is colored grey.
depths_gt[torch.logical_not(mask)] = -99
self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred[0]}, prefix="Train",
min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth'])
if self.config.get("log_rel", False):
self.log_images(
scalar_field={"RelPred": output["relative_depth"][0]}, prefix="TrainRel")
self.scaler.update()
self.optimizer.zero_grad()
return losses | Expects a batch of images and depth as input
batch["image"].shape : batch_size, c, h, w
batch["depth"].shape : batch_size, 1, h, w | train_on_batch | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/trainers/zoedepth_trainer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/trainers/zoedepth_trainer.py | Apache-2.0 |
def __init__(self, config, model, train_loader, test_loader=None, device=None):
""" Base Trainer class for training a model."""
self.config = config
self.metric_criterion = "abs_rel"
if device is None:
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
self.device = device
self.model = model
self.train_loader = train_loader
self.test_loader = test_loader
self.optimizer = self.init_optimizer()
self.scheduler = self.init_scheduler() | Base Trainer class for training a model. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/trainers/base_trainer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/trainers/base_trainer.py | Apache-2.0 |
def __init__(self, config, mode, device='cpu', transform=None, **kwargs):
"""
Data loader for depth datasets
Args:
config (dict): Config dictionary. Refer to utils/config.py
mode (str): "train" or "online_eval"
device (str, optional): Device to load the data on. Defaults to 'cpu'.
transform (torchvision.transforms, optional): Transform to apply to the data. Defaults to None.
"""
self.config = config
if config.dataset == 'ibims':
self.data = get_ibims_loader(config, batch_size=1, num_workers=1)
return
if config.dataset == 'sunrgbd':
self.data = get_sunrgbd_loader(
data_dir_root=config.sunrgbd_root, batch_size=1, num_workers=1)
return
if config.dataset == 'diml_indoor':
self.data = get_diml_indoor_loader(
data_dir_root=config.diml_indoor_root, batch_size=1, num_workers=1)
return
if config.dataset == 'diml_outdoor':
self.data = get_diml_outdoor_loader(
data_dir_root=config.diml_outdoor_root, batch_size=1, num_workers=1)
return
if "diode" in config.dataset:
self.data = get_diode_loader(
config[config.dataset+"_root"], batch_size=1, num_workers=1)
return
if config.dataset == 'hypersim_test':
self.data = get_hypersim_loader(
config.hypersim_test_root, batch_size=1, num_workers=1)
return
if config.dataset == 'vkitti':
self.data = get_vkitti_loader(
config.vkitti_root, batch_size=1, num_workers=1)
return
if config.dataset == 'vkitti2':
self.data = get_vkitti2_loader(
config.vkitti2_root, batch_size=1, num_workers=1)
return
if config.dataset == 'ddad':
self.data = get_ddad_loader(config.ddad_root, resize_shape=(
352, 1216), batch_size=1, num_workers=1)
return
img_size = self.config.get("img_size", None)
img_size = img_size if self.config.get(
"do_input_resize", False) else None
if transform is None:
transform = preprocessing_transforms(mode, size=img_size)
if mode == 'train':
Dataset = DataLoadPreprocess
self.training_samples = Dataset(
config, mode, transform=transform, device=device)
if config.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(
self.training_samples)
else:
self.train_sampler = None
self.data = DataLoader(self.training_samples,
batch_size=config.batch_size,
shuffle=(self.train_sampler is None),
num_workers=config.workers,
pin_memory=True,
persistent_workers=True,
# prefetch_factor=2,
sampler=self.train_sampler)
elif mode == 'online_eval':
self.testing_samples = DataLoadPreprocess(
config, mode, transform=transform)
if config.distributed: # redundant. here only for readability and to be more explicit
# Give whole test set to all processes (and report evaluation only on one) regardless
self.eval_sampler = None
else:
self.eval_sampler = None
self.data = DataLoader(self.testing_samples, 1,
shuffle=kwargs.get("shuffle_test", False),
num_workers=1,
pin_memory=False,
sampler=self.eval_sampler)
elif mode == 'test':
self.testing_samples = DataLoadPreprocess(
config, mode, transform=transform)
self.data = DataLoader(self.testing_samples,
1, shuffle=False, num_workers=1)
else:
print(
'mode should be one of \'train, test, online_eval\'. Got {}'.format(mode)) | Data loader for depth datasets
Args:
config (dict): Config dictionary. Refer to utils/config.py
mode (str): "train" or "online_eval"
device (str, optional): Device to load the data on. Defaults to 'cpu'.
transform (torchvision.transforms, optional): Transform to apply to the data. Defaults to None. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/data_mono.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/data_mono.py | Apache-2.0 |
def repetitive_roundrobin(*iterables):
"""
cycles through iterables but sample wise
first yield first sample from first iterable then first sample from second iterable and so on
then second sample from first iterable then second sample from second iterable and so on
If one iterable is shorter than the others, it is repeated until all iterables are exhausted
repetitive_roundrobin('ABC', 'D', 'EF') --> A D E B D F C D E
"""
# Repetitive roundrobin
iterables_ = [iter(it) for it in iterables]
exhausted = [False] * len(iterables)
while not all(exhausted):
for i, it in enumerate(iterables_):
try:
yield next(it)
except StopIteration:
exhausted[i] = True
iterables_[i] = itertools.cycle(iterables[i])
# First elements may get repeated if one iterable is shorter than the others
yield next(iterables_[i]) | cycles through iterables but sample wise
first yield first sample from first iterable then first sample from second iterable and so on
then second sample from first iterable then second sample from second iterable and so on
If one iterable is shorter than the others, it is repeated until all iterables are exhausted
repetitive_roundrobin('ABC', 'D', 'EF') --> A D E B D F C D E | repetitive_roundrobin | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/data_mono.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/data_mono.py | Apache-2.0 |
def __init__(self, probability=0.5):
"""Init.
Args:
probability (float, optional): Flip probability. Defaults to 0.5.
"""
self.__probability = probability | Init.
Args:
probability (float, optional): Flip probability. Defaults to 0.5. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/transforms.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/transforms.py | Apache-2.0 |
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size
"""
shape = list(sample["disparity"].shape)
if shape[0] >= size[0] and shape[1] >= size[1]:
return sample
scale = [0, 0]
scale[0] = size[0] / shape[0]
scale[1] = size[1] / shape[1]
scale = max(scale)
shape[0] = math.ceil(scale * shape[0])
shape[1] = math.ceil(scale * shape[1])
# resize
sample["image"] = cv2.resize(
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
)
sample["disparity"] = cv2.resize(
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
)
sample["mask"] = cv2.resize(
sample["mask"].astype(np.float32),
tuple(shape[::-1]),
interpolation=cv2.INTER_NEAREST,
)
sample["mask"] = sample["mask"].astype(bool)
return tuple(shape) | Rezise the sample to ensure the given size. Keeps aspect ratio.
Args:
sample (dict): sample
size (tuple): image size
Returns:
tuple: new size | apply_min_size | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/transforms.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/transforms.py | Apache-2.0 |
def __init__(
self,
width,
height,
resize_if_needed=False,
image_interpolation_method=cv2.INTER_AREA,
):
"""Init.
Args:
width (int): output width
height (int): output height
resize_if_needed (bool, optional): If True, sample might be upsampled to ensure
that a crop of size (width, height) is possbile. Defaults to False.
"""
self.__size = (height, width)
self.__resize_if_needed = resize_if_needed
self.__image_interpolation_method = image_interpolation_method | Init.
Args:
width (int): output width
height (int): output height
resize_if_needed (bool, optional): If True, sample might be upsampled to ensure
that a crop of size (width, height) is possbile. Defaults to False. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/transforms.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/transforms.py | Apache-2.0 |
def __init__(
self,
width,
height,
resize_target=True,
keep_aspect_ratio=False,
ensure_multiple_of=1,
resize_method="lower_bound",
image_interpolation_method=cv2.INTER_AREA,
letter_box=False,
):
"""Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound".
"""
self.__width = width
self.__height = height
self.__resize_target = resize_target
self.__keep_aspect_ratio = keep_aspect_ratio
self.__multiple_of = ensure_multiple_of
self.__resize_method = resize_method
self.__image_interpolation_method = image_interpolation_method
self.__letter_box = letter_box | Init.
Args:
width (int): desired output width
height (int): desired output height
resize_target (bool, optional):
True: Resize the full sample (image, mask, target).
False: Resize image only.
Defaults to True.
keep_aspect_ratio (bool, optional):
True: Keep the aspect ratio of the input sample.
Output sample might not have the given width and height, and
resize behaviour depends on the parameter 'resize_method'.
Defaults to False.
ensure_multiple_of (int, optional):
Output width and height is constrained to be multiple of this parameter.
Defaults to 1.
resize_method (str, optional):
"lower_bound": Output will be at least as large as the given size.
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
Defaults to "lower_bound". | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/transforms.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/transforms.py | Apache-2.0 |
def __init__(self, max_val=1.0, use_mask=True):
"""Init.
Args:
max_val (float, optional): Max output value. Defaults to 1.0.
use_mask (bool, optional): Only operate on valid pixels (mask == True). Defaults to True.
"""
self.__max_val = max_val
self.__use_mask = use_mask | Init.
Args:
max_val (float, optional): Max output value. Defaults to 1.0.
use_mask (bool, optional): Only operate on valid pixels (mask == True). Defaults to True. | __init__ | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/transforms.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/transforms.py | Apache-2.0 |
def get_white_border(rgb_image, value=255, **kwargs) -> CropParams:
"""Crops the white border of the RGB.
Args:
rgb: RGB image, shape (H, W, 3).
Returns:
Crop parameters.
"""
if value == 255:
# assert range of values in rgb image is [0, 255]
assert np.max(rgb_image) <= 255 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 255]."
assert rgb_image.max() > 1, "RGB image values are not in range [0, 255]."
elif value == 1:
# assert range of values in rgb image is [0, 1]
assert np.max(rgb_image) <= 1 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 1]."
return get_border_params(rgb_image, value=value, **kwargs) | Crops the white border of the RGB.
Args:
rgb: RGB image, shape (H, W, 3).
Returns:
Crop parameters. | get_white_border | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/preprocess.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/preprocess.py | Apache-2.0 |
def get_black_border(rgb_image, **kwargs) -> CropParams:
"""Crops the black border of the RGB.
Args:
rgb: RGB image, shape (H, W, 3).
Returns:
Crop parameters.
"""
return get_border_params(rgb_image, value=0, **kwargs) | Crops the black border of the RGB.
Args:
rgb: RGB image, shape (H, W, 3).
Returns:
Crop parameters. | get_black_border | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/preprocess.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/preprocess.py | Apache-2.0 |
def crop_image(image: np.ndarray, crop_params: CropParams) -> np.ndarray:
"""Crops the image according to the crop parameters.
Args:
image: RGB or depth image, shape (H, W, 3) or (H, W).
crop_params: Crop parameters.
Returns:
Cropped image.
"""
return image[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right] | Crops the image according to the crop parameters.
Args:
image: RGB or depth image, shape (H, W, 3) or (H, W).
crop_params: Crop parameters.
Returns:
Cropped image. | crop_image | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/preprocess.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/preprocess.py | Apache-2.0 |
def crop_images(*images: np.ndarray, crop_params: CropParams) -> Tuple[np.ndarray]:
"""Crops the images according to the crop parameters.
Args:
images: RGB or depth images, shape (H, W, 3) or (H, W).
crop_params: Crop parameters.
Returns:
Cropped images.
"""
return tuple(crop_image(image, crop_params) for image in images) | Crops the images according to the crop parameters.
Args:
images: RGB or depth images, shape (H, W, 3) or (H, W).
crop_params: Crop parameters.
Returns:
Cropped images. | crop_images | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/preprocess.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/preprocess.py | Apache-2.0 |
def crop_black_or_white_border(rgb_image, *other_images: np.ndarray, tolerance=0.1, cut_off=20, level_diff_threshold=5) -> Tuple[np.ndarray]:
"""Crops the white and black border of the RGB and depth images.
Args:
rgb: RGB image, shape (H, W, 3). This image is used to determine the border.
other_images: The other images to crop according to the border of the RGB image.
Returns:
Cropped RGB and other images.
"""
# crop black border
crop_params = get_black_border(rgb_image, tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold)
cropped_images = crop_images(rgb_image, *other_images, crop_params=crop_params)
# crop white border
crop_params = get_white_border(cropped_images[0], tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold)
cropped_images = crop_images(*cropped_images, crop_params=crop_params)
return cropped_images | Crops the white and black border of the RGB and depth images.
Args:
rgb: RGB image, shape (H, W, 3). This image is used to determine the border.
other_images: The other images to crop according to the border of the RGB image.
Returns:
Cropped RGB and other images. | crop_black_or_white_border | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/data/preprocess.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/data/preprocess.py | Apache-2.0 |
def split_combined_args(kwargs):
"""Splits the arguments that are combined with '__' into multiple arguments.
Combined arguments should have equal number of keys and values.
Keys are separated by '__' and Values are separated with ';'.
For example, '__n_bins__lr=256;0.001'
Args:
kwargs (dict): key-value pairs of arguments where key-value is optionally combined according to the above format.
Returns:
dict: Parsed dict with the combined arguments split into individual key-value pairs.
"""
new_kwargs = dict(kwargs)
for key, value in kwargs.items():
if key.startswith("__"):
keys = key.split("__")[1:]
values = value.split(";")
assert len(keys) == len(
values), f"Combined arguments should have equal number of keys and values. Keys are separated by '__' and Values are separated with ';'. For example, '__n_bins__lr=256;0.001. Given (keys,values) is ({keys}, {values})"
for k, v in zip(keys, values):
new_kwargs[k] = v
return new_kwargs | Splits the arguments that are combined with '__' into multiple arguments.
Combined arguments should have equal number of keys and values.
Keys are separated by '__' and Values are separated with ';'.
For example, '__n_bins__lr=256;0.001'
Args:
kwargs (dict): key-value pairs of arguments where key-value is optionally combined according to the above format.
Returns:
dict: Parsed dict with the combined arguments split into individual key-value pairs. | split_combined_args | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/config.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/config.py | Apache-2.0 |
def parse_list(config, key, dtype=int):
"""Parse a list of values for the key if the value is a string. The values are separated by a comma.
Modifies the config in place.
"""
if key in config:
if isinstance(config[key], str):
config[key] = list(map(dtype, config[key].split(',')))
assert isinstance(config[key], list) and all([isinstance(e, dtype) for e in config[key]]
), f"{key} should be a list of values dtype {dtype}. Given {config[key]} of type {type(config[key])} with values of type {[type(e) for e in config[key]]}." | Parse a list of values for the key if the value is a string. The values are separated by a comma.
Modifies the config in place. | parse_list | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/config.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/config.py | Apache-2.0 |
def get_model_config(model_name, model_version=None):
"""Find and parse the .json config file for the model.
Args:
model_name (str): name of the model. The config file should be named config_{model_name}[_{model_version}].json under the models/{model_name} directory.
model_version (str, optional): Specific config version. If specified config_{model_name}_{model_version}.json is searched for and used. Otherwise config_{model_name}.json is used. Defaults to None.
Returns:
easydict: the config dictionary for the model.
"""
config_fname = f"config_{model_name}_{model_version}.json" if model_version is not None else f"config_{model_name}.json"
config_file = os.path.join(ROOT, "models", model_name, config_fname)
if not os.path.exists(config_file):
return None
with open(config_file, "r") as f:
config = edict(json.load(f))
# handle dictionary inheritance
# only training config is supported for inheritance
if "inherit" in config.train and config.train.inherit is not None:
inherit_config = get_model_config(config.train["inherit"]).train
for key, value in inherit_config.items():
if key not in config.train:
config.train[key] = value
return edict(config) | Find and parse the .json config file for the model.
Args:
model_name (str): name of the model. The config file should be named config_{model_name}[_{model_version}].json under the models/{model_name} directory.
model_version (str, optional): Specific config version. If specified config_{model_name}_{model_version}.json is searched for and used. Otherwise config_{model_name}.json is used. Defaults to None.
Returns:
easydict: the config dictionary for the model. | get_model_config | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/config.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/config.py | Apache-2.0 |
def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
"""Main entry point to get the config for the model.
Args:
model_name (str): name of the desired model.
mode (str, optional): "train" or "infer". Defaults to 'train'.
dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
Keyword Args: key-value pairs of arguments to overwrite the default config.
The order of precedence for overwriting the config is (Higher precedence first):
# 1. overwrite_kwargs
# 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
# 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
# 4. common_config: Default config for all models specified in COMMON_CONFIG
Returns:
easydict: The config dictionary for the model.
"""
check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"])
check_choices("Mode", mode, ["train", "infer", "eval"])
if mode == "train":
check_choices("Dataset", dataset, ["nyu", "kitti", "mix", None])
config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})
config = update_model_config(config, mode, model_name)
# update with model version specific config
version_name = overwrite_kwargs.get("version_name", config["version_name"])
config = update_model_config(config, mode, model_name, version_name)
# update with config version if specified
config_version = overwrite_kwargs.get("config_version", None)
if config_version is not None:
print("Overwriting config with config_version", config_version)
config = update_model_config(config, mode, model_name, config_version)
# update with overwrite_kwargs
# Combined args are useful for hyperparameter search
overwrite_kwargs = split_combined_args(overwrite_kwargs)
config = {**config, **overwrite_kwargs}
# Casting to bool # TODO: Not necessary. Remove and test
for key in KEYS_TYPE_BOOL:
if key in config:
config[key] = bool(config[key])
# Model specific post processing of config
parse_list(config, "n_attractors")
# adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs
if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:
bin_conf = config['bin_conf'] # list of dicts
n_bins = overwrite_kwargs['n_bins']
new_bin_conf = []
for conf in bin_conf:
conf['n_bins'] = n_bins
new_bin_conf.append(conf)
config['bin_conf'] = new_bin_conf
if mode == "train":
orig_dataset = dataset
if dataset == "mix":
dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader
if dataset is not None:
config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb
if dataset is not None:
config['dataset'] = dataset
config = {**DATASETS_CONFIG[dataset], **config}
config['model'] = model_name
typed_config = {k: infer_type(v) for k, v in config.items()}
# add hostname to config
config['hostname'] = platform.node()
return edict(typed_config) | Main entry point to get the config for the model.
Args:
model_name (str): name of the desired model.
mode (str, optional): "train" or "infer". Defaults to 'train'.
dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
Keyword Args: key-value pairs of arguments to overwrite the default config.
The order of precedence for overwriting the config is (Higher precedence first):
# 1. overwrite_kwargs
# 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
# 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
# 4. common_config: Default config for all models specified in COMMON_CONFIG
Returns:
easydict: The config dictionary for the model. | get_config | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/config.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/config.py | Apache-2.0 |
def get_intrinsics(H,W):
"""
Intrinsics for a pinhole camera model.
Assume fov of 55 degrees and central principal point.
"""
f = 0.5 * W / np.tan(0.5 * 55 * np.pi / 180.0)
cx = 0.5 * W
cy = 0.5 * H
return np.array([[f, 0, cx],
[0, f, cy],
[0, 0, 1]]) | Intrinsics for a pinhole camera model.
Assume fov of 55 degrees and central principal point. | get_intrinsics | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/geometry.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/geometry.py | Apache-2.0 |
def create_triangles(h, w, mask=None):
"""
Reference: https://github.com/google-research/google-research/blob/e96197de06613f1b027d20328e06d69829fa5a89/infinite_nature/render_utils.py#L68
Creates mesh triangle indices from a given pixel grid size.
This function is not and need not be differentiable as triangle indices are
fixed.
Args:
h: (int) denoting the height of the image.
w: (int) denoting the width of the image.
Returns:
triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)
"""
x, y = np.meshgrid(range(w - 1), range(h - 1))
tl = y * w + x
tr = y * w + x + 1
bl = (y + 1) * w + x
br = (y + 1) * w + x + 1
triangles = np.array([tl, bl, tr, br, tr, bl])
triangles = np.transpose(triangles, (1, 2, 0)).reshape(
((w - 1) * (h - 1) * 2, 3))
if mask is not None:
mask = mask.reshape(-1)
triangles = triangles[mask[triangles].all(1)]
return triangles | Reference: https://github.com/google-research/google-research/blob/e96197de06613f1b027d20328e06d69829fa5a89/infinite_nature/render_utils.py#L68
Creates mesh triangle indices from a given pixel grid size.
This function is not and need not be differentiable as triangle indices are
fixed.
Args:
h: (int) denoting the height of the image.
w: (int) denoting the width of the image.
Returns:
triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3) | create_triangles | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/geometry.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/geometry.py | Apache-2.0 |
def denormalize(x):
"""Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input
"""
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
return x * std + mean | Reverses the imagenet normalization applied to the input.
Args:
x (torch.Tensor - shape(N,3,H,W)): input tensor
Returns:
torch.Tensor - shape(N,3,H,W): Denormalized input | denormalize | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/misc.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/misc.py | Apache-2.0 |
def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
"""Converts a depth map to a color image.
Args:
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
Returns:
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
value = value.squeeze()
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = np.logical_not(invalid_mask)
# normalize
vmin = np.percentile(value[mask],2) if vmin is None else vmin
vmax = np.percentile(value[mask],85) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.
# squeeze last dim if it exists
# grey out the invalid values
value[invalid_mask] = np.nan
cmapper = matplotlib.cm.get_cmap(cmap)
if value_transform:
value = value_transform(value)
# value = value / value.max()
value = cmapper(value, bytes=True) # (nxmx4)
# img = value[:, :, :]
img = value[...]
img[invalid_mask] = background_color
# return img.transpose((2, 0, 1))
if gamma_corrected:
# gamma correction
img = img / 255
img = np.power(img, 2.2)
img = img * 255
img = img.astype(np.uint8)
return img | Converts a depth map to a color image.
Args:
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
Returns:
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4) | colorize | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/misc.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/misc.py | Apache-2.0 |
def compute_errors(gt, pred):
"""Compute metrics for 'pred' compared to 'gt'
Args:
gt (numpy.ndarray): Ground truth values
pred (numpy.ndarray): Predicted values
gt.shape should be equal to pred.shape
Returns:
dict: Dictionary containing the following metrics:
'a1': Delta1 accuracy: Fraction of pixels that are within a scale factor of 1.25
'a2': Delta2 accuracy: Fraction of pixels that are within a scale factor of 1.25^2
'a3': Delta3 accuracy: Fraction of pixels that are within a scale factor of 1.25^3
'abs_rel': Absolute relative error
'rmse': Root mean squared error
'log_10': Absolute log10 error
'sq_rel': Squared relative error
'rmse_log': Root mean squared error on the log scale
'silog': Scale invariant log error
"""
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
silog=silog, sq_rel=sq_rel) | Compute metrics for 'pred' compared to 'gt'
Args:
gt (numpy.ndarray): Ground truth values
pred (numpy.ndarray): Predicted values
gt.shape should be equal to pred.shape
Returns:
dict: Dictionary containing the following metrics:
'a1': Delta1 accuracy: Fraction of pixels that are within a scale factor of 1.25
'a2': Delta2 accuracy: Fraction of pixels that are within a scale factor of 1.25^2
'a3': Delta3 accuracy: Fraction of pixels that are within a scale factor of 1.25^3
'abs_rel': Absolute relative error
'rmse': Root mean squared error
'log_10': Absolute log10 error
'sq_rel': Squared relative error
'rmse_log': Root mean squared error on the log scale
'silog': Scale invariant log error | compute_errors | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/misc.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/misc.py | Apache-2.0 |
def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, **kwargs):
"""Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.
"""
if 'config' in kwargs:
config = kwargs['config']
garg_crop = config.garg_crop
eigen_crop = config.eigen_crop
min_depth_eval = config.min_depth_eval
max_depth_eval = config.max_depth_eval
if gt.shape[-2:] != pred.shape[-2:] and interpolate:
pred = nn.functional.interpolate(
pred, gt.shape[-2:], mode='bilinear', align_corners=True)
pred = pred.squeeze().cpu().numpy()
pred[pred < min_depth_eval] = min_depth_eval
pred[pred > max_depth_eval] = max_depth_eval
pred[np.isinf(pred)] = max_depth_eval
pred[np.isnan(pred)] = min_depth_eval
gt_depth = gt.squeeze().cpu().numpy()
valid_mask = np.logical_and(
gt_depth > min_depth_eval, gt_depth < max_depth_eval)
if garg_crop or eigen_crop:
gt_height, gt_width = gt_depth.shape
eval_mask = np.zeros(valid_mask.shape)
if garg_crop:
eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
elif eigen_crop:
# print("-"*10, " EIGEN CROP ", "-"*10)
if dataset == 'kitti':
eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
else:
# assert gt_depth.shape == (480, 640), "Error: Eigen crop is currently only valid for (480, 640) images"
eval_mask[45:471, 41:601] = 1
else:
eval_mask = np.ones(valid_mask.shape)
valid_mask = np.logical_and(valid_mask, eval_mask)
return compute_errors(gt_depth[valid_mask], pred[valid_mask]) | Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics. | compute_metrics | python | LiheYoung/Depth-Anything | metric_depth/zoedepth/utils/misc.py | https://github.com/LiheYoung/Depth-Anything/blob/master/metric_depth/zoedepth/utils/misc.py | Apache-2.0 |
def dinov2_vits14(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-S/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_small", pretrained=pretrained, weights=weights, **kwargs) | DINOv2 ViT-S/14 model (optionally) pretrained on the LVD-142M dataset. | dinov2_vits14 | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vitb14(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-B/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_base", pretrained=pretrained, weights=weights, **kwargs) | DINOv2 ViT-B/14 model (optionally) pretrained on the LVD-142M dataset. | dinov2_vitb14 | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vitl14(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-L/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(arch_name="vit_large", pretrained=pretrained, weights=weights, **kwargs) | DINOv2 ViT-L/14 model (optionally) pretrained on the LVD-142M dataset. | dinov2_vitl14 | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vitg14(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-g/14 model (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(
arch_name="vit_giant2",
ffn_layer="swiglufused",
weights=weights,
pretrained=pretrained,
**kwargs,
) | DINOv2 ViT-g/14 model (optionally) pretrained on the LVD-142M dataset. | dinov2_vitg14 | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vits14_reg(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-S/14 model with registers (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(
arch_name="vit_small",
pretrained=pretrained,
weights=weights,
num_register_tokens=4,
interpolate_antialias=True,
interpolate_offset=0.0,
**kwargs,
) | DINOv2 ViT-S/14 model with registers (optionally) pretrained on the LVD-142M dataset. | dinov2_vits14_reg | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vitb14_reg(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-B/14 model with registers (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(
arch_name="vit_base",
pretrained=pretrained,
weights=weights,
num_register_tokens=4,
interpolate_antialias=True,
interpolate_offset=0.0,
**kwargs,
) | DINOv2 ViT-B/14 model with registers (optionally) pretrained on the LVD-142M dataset. | dinov2_vitb14_reg | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vitl14_reg(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-L/14 model with registers (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(
arch_name="vit_large",
pretrained=pretrained,
weights=weights,
num_register_tokens=4,
interpolate_antialias=True,
interpolate_offset=0.0,
**kwargs,
) | DINOv2 ViT-L/14 model with registers (optionally) pretrained on the LVD-142M dataset. | dinov2_vitl14_reg | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def dinov2_vitg14_reg(*, pretrained: bool = True, weights: Union[Weights, str] = Weights.LVD142M, **kwargs):
"""
DINOv2 ViT-g/14 model with registers (optionally) pretrained on the LVD-142M dataset.
"""
return _make_dinov2_model(
arch_name="vit_giant2",
ffn_layer="swiglufused",
weights=weights,
pretrained=pretrained,
num_register_tokens=4,
interpolate_antialias=True,
interpolate_offset=0.0,
**kwargs,
) | DINOv2 ViT-g/14 model with registers (optionally) pretrained on the LVD-142M dataset. | dinov2_vitg14_reg | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/hubconf.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/hubconf.py | Apache-2.0 |
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
ffn_bias=True,
proj_bias=True,
drop_path_rate=0.0,
drop_path_uniform=False,
init_values=None, # for layerscale: None or 0 => no layerscale
embed_layer=PatchEmbed,
act_layer=nn.GELU,
block_fn=Block,
ffn_layer="mlp",
block_chunks=1,
num_register_tokens=0,
interpolate_antialias=False,
interpolate_offset=0.1,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
proj_bias (bool): enable bias for proj in attn if True
ffn_bias (bool): enable bias for ffn if True
drop_path_rate (float): stochastic depth rate
drop_path_uniform (bool): apply uniform drop rate across blocks
weight_init (str): weight init scheme
init_values (float): layer-scale init values
embed_layer (nn.Module): patch embedding layer
act_layer (nn.Module): MLP activation layer
block_fn (nn.Module): transformer block class
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
num_register_tokens: (int) number of extra cls tokens (so-called "registers")
interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
"""
super().__init__()
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 1
self.n_blocks = depth
self.num_heads = num_heads
self.patch_size = patch_size
self.num_register_tokens = num_register_tokens
self.interpolate_antialias = interpolate_antialias
self.interpolate_offset = interpolate_offset
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
assert num_register_tokens >= 0
self.register_tokens = (
nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
)
if drop_path_uniform is True:
dpr = [drop_path_rate] * depth
else:
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
if ffn_layer == "mlp":
logger.info("using MLP layer as FFN")
ffn_layer = Mlp
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
logger.info("using SwiGLU layer as FFN")
ffn_layer = SwiGLUFFNFused
elif ffn_layer == "identity":
logger.info("using Identity layer as FFN")
def f(*args, **kwargs):
return nn.Identity()
ffn_layer = f
else:
raise NotImplementedError
blocks_list = [
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
proj_bias=proj_bias,
ffn_bias=ffn_bias,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
ffn_layer=ffn_layer,
init_values=init_values,
)
for i in range(depth)
]
if block_chunks > 0:
self.chunked_blocks = True
chunked_blocks = []
chunksize = depth // block_chunks
for i in range(0, depth, chunksize):
# this is to keep the block index consistent if we chunk the block list
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
else:
self.chunked_blocks = False
self.blocks = nn.ModuleList(blocks_list)
self.norm = norm_layer(embed_dim)
self.head = nn.Identity()
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
self.init_weights() | Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
proj_bias (bool): enable bias for proj in attn if True
ffn_bias (bool): enable bias for ffn if True
drop_path_rate (float): stochastic depth rate
drop_path_uniform (bool): apply uniform drop rate across blocks
weight_init (str): weight init scheme
init_values (float): layer-scale init values
embed_layer (nn.Module): patch embedding layer
act_layer (nn.Module): MLP activation layer
block_fn (nn.Module): transformer block class
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
num_register_tokens: (int) number of extra cls tokens (so-called "registers")
interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings | __init__ | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/vision_transformer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/vision_transformer.py | Apache-2.0 |
def init_weights_vit_timm(module: nn.Module, name: str = ""):
"""ViT weight initialization, original timm impl (for reproducibility)"""
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=0.02)
if module.bias is not None:
nn.init.zeros_(module.bias) | ViT weight initialization, original timm impl (for reproducibility) | init_weights_vit_timm | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/vision_transformer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/vision_transformer.py | Apache-2.0 |
def vit_giant2(patch_size=16, num_register_tokens=0, **kwargs):
"""
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
"""
model = DinoVisionTransformer(
patch_size=patch_size,
embed_dim=1536,
depth=40,
num_heads=24,
mlp_ratio=4,
block_fn=partial(Block, attn_class=MemEffAttention),
num_register_tokens=num_register_tokens,
**kwargs,
)
return model | Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64 | vit_giant2 | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/vision_transformer.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/vision_transformer.py | Apache-2.0 |
def forward(self, features_rank):
"""
Compute the results on all values of `self.nb_knn` neighbors from the full `self.max_k`
"""
assert all(k <= self.max_k for k in self.nb_knn)
topk_sims, neighbors_labels = self.compute_neighbors(features_rank)
batch_size = neighbors_labels.shape[0]
topk_sims_transform = softmax(topk_sims / self.T, 1)
matmul = torch.mul(
one_hot(neighbors_labels, num_classes=self.num_classes),
topk_sims_transform.view(batch_size, -1, 1),
)
probas_for_k = {k: torch.sum(matmul[:, :k, :], 1) for k in self.nb_knn}
return probas_for_k | Compute the results on all values of `self.nb_knn` neighbors from the full `self.max_k` | forward | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/dinov2/eval/knn.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/dinov2/eval/knn.py | Apache-2.0 |
def eval_log_regression(
*,
model,
train_dataset,
val_dataset,
finetune_dataset,
metric_type,
batch_size,
num_workers,
finetune_on_val=False,
train_dtype=torch.float64,
train_features_device=_CPU_DEVICE,
max_train_iters=DEFAULT_MAX_ITER,
):
"""
Implements the "standard" process for log regression evaluation:
The value of C is chosen by training on train_dataset and evaluating on
finetune_dataset. Then, the final model is trained on a concatenation of
train_dataset and finetune_dataset, and is evaluated on val_dataset.
If there is no finetune_dataset, the value of C is the one that yields
the best results on a random 10% subset of the train dataset
"""
start = time.time()
train_features, train_labels = extract_features(
model, train_dataset, batch_size, num_workers, gather_on_cpu=(train_features_device == _CPU_DEVICE)
)
val_features, val_labels = extract_features(
model, val_dataset, batch_size, num_workers, gather_on_cpu=(train_features_device == _CPU_DEVICE)
)
val_data_loader = torch.utils.data.DataLoader(
TensorDataset(val_features, val_labels),
batch_size=batch_size,
drop_last=False,
num_workers=0,
persistent_workers=False,
)
if finetune_dataset is None and finetune_on_val:
logger.info("Choosing hyperparameters on the val dataset")
finetune_features, finetune_labels = val_features, val_labels
elif finetune_dataset is None and not finetune_on_val:
logger.info("Choosing hyperparameters on 10% of the train dataset")
torch.manual_seed(0)
indices = torch.randperm(len(train_features), device=train_features.device)
finetune_index = indices[: len(train_features) // 10]
train_index = indices[len(train_features) // 10 :]
finetune_features, finetune_labels = train_features[finetune_index], train_labels[finetune_index]
train_features, train_labels = train_features[train_index], train_labels[train_index]
else:
logger.info("Choosing hyperparameters on the finetune dataset")
finetune_features, finetune_labels = extract_features(
model, finetune_dataset, batch_size, num_workers, gather_on_cpu=(train_features_device == _CPU_DEVICE)
)
# release the model - free GPU memory
del model
gc.collect()
torch.cuda.empty_cache()
finetune_data_loader = torch.utils.data.DataLoader(
TensorDataset(finetune_features, finetune_labels),
batch_size=batch_size,
drop_last=False,
)
if len(train_labels.shape) > 1:
num_classes = train_labels.shape[1]
else:
num_classes = train_labels.max() + 1
logger.info("Using cuML for logistic regression")
best_stats, best_C = sweep_C_values(
train_features=train_features,
train_labels=train_labels,
test_data_loader=finetune_data_loader,
metric_type=metric_type,
num_classes=num_classes,
train_dtype=train_dtype,
train_features_device=train_features_device,
max_train_iters=max_train_iters,
)
if not finetune_on_val:
logger.info("Best parameter found, concatenating features")
train_features = torch.cat((train_features, finetune_features))
train_labels = torch.cat((train_labels, finetune_labels))
logger.info("Training final model")
logreg_metric = build_metric(metric_type, num_classes=num_classes)
evals = train_and_evaluate(
C=best_C,
max_iter=max_train_iters,
train_features=train_features,
train_labels=train_labels,
logreg_metric=logreg_metric.clone(),
test_data_loader=val_data_loader,
eval_device=torch.cuda.current_device(),
train_dtype=train_dtype,
train_features_device=train_features_device,
)
best_stats = evals[1]["metrics"]
best_stats["best_C"] = best_C
logger.info(f"Log regression evaluation done in {int(time.time() - start)}s")
return best_stats | Implements the "standard" process for log regression evaluation:
The value of C is chosen by training on train_dataset and evaluating on
finetune_dataset. Then, the final model is trained on a concatenation of
train_dataset and finetune_dataset, and is evaluated on val_dataset.
If there is no finetune_dataset, the value of C is the one that yields
the best results on a random 10% subset of the train dataset | eval_log_regression | python | LiheYoung/Depth-Anything | torchhub/facebookresearch_dinov2_main/dinov2/eval/log_regression.py | https://github.com/LiheYoung/Depth-Anything/blob/master/torchhub/facebookresearch_dinov2_main/dinov2/eval/log_regression.py | Apache-2.0 |
Subsets and Splits