response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Simple wrapper for correctly getting rank in both distributed / non-distributed settings
def get_rank() -> int: """ Simple wrapper for correctly getting rank in both distributed / non-distributed settings """ return ( torch.distributed.get_rank() if torch.distributed.is_available() and torch.distributed.is_initialized() else 0 )
Broadcast an object from a source to all workers. Args: obj: Object to broadcast, must be serializable src: Source rank for broadcast (default is primary) use_disk: If enabled, removes redundant CPU memory copies by writing to disk
def broadcast_object(obj: Any, src: int = _PRIMARY_RANK, use_disk: bool = True) -> Any: """Broadcast an object from a source to all workers. Args: obj: Object to broadcast, must be serializable src: Source rank for broadcast (default is primary) use_disk: If enabled, removes redundant CPU memory copies by writing to disk """ # Either broadcast from primary to the fleet (default), # or use the src setting as the original rank if get_rank() == src: # Emit data buffer = io.BytesIO() torch.save(obj, buffer) data_view = buffer.getbuffer() length_tensor = torch.LongTensor([len(data_view)]) length_tensor = broadcast(length_tensor, src=src) data_tensor = torch.ByteTensor(data_view) data_tensor = broadcast(data_tensor, src=src) else: # Fetch from the source length_tensor = torch.LongTensor([0]) length_tensor = broadcast(length_tensor, src=src) data_tensor = torch.empty([length_tensor.item()], dtype=torch.uint8) data_tensor = broadcast(data_tensor, src=src) if use_disk: with tempfile.TemporaryFile("r+b") as f: f.write(data_tensor.numpy()) # remove reference to the data tensor and hope that Python garbage # collects it del data_tensor f.seek(0) obj = torch.load(f) else: buffer = io.BytesIO(data_tensor.numpy()) obj = torch.load(buffer) return obj
Adds generic command-line arguments for convnet training / testing to parser.
def add_generic_args(parser): """ Adds generic command-line arguments for convnet training / testing to parser. """ parser.add_argument( "--config_file", type=str, help="path to config file for model", required=True ) parser.add_argument( "--checkpoint_folder", default="", type=str, help="""folder to use for saving checkpoints: epochal checkpoints are stored as model_<epoch>.torch, latest epoch checkpoint is at checkpoint.torch""", ) parser.add_argument( "--checkpoint_load_path", default="", type=str, help="""path to load a checkpoint from, which can be a file or a directory: If the path is a directory, the checkpoint file is assumed to be checkpoint.torch""", ) parser.add_argument( "--pretrained_checkpoint_path", default="", type=str, help="""path to load a pre-trained checkpoints from, which can be a file or a directory: If the path is a directory, the checkpoint file is assumed to be checkpoint.torch. This checkpoint is only used for fine-tuning tasks, and training will not resume from this checkpoint.""", ) parser.add_argument( "--checkpoint_period", default=1, type=int, help="""Checkpoint every x phases (default 1)""", ) parser.add_argument( "--show_progress", default=False, action="store_true", help="shows progress bar during training / testing", ) parser.add_argument( "--skip_tensorboard", default=False, action="store_true", help="do not perform tensorboard visualization", ) parser.add_argument( "--visdom_server", default="", type=str, help="visdom server to use (default None)", ) parser.add_argument( "--visdom_port", default=8097, type=int, help="port of visdom server (default = 8097)", ) parser.add_argument( "--profiler", default=False, action="store_true", help="specify this argument to profile training code", ) parser.add_argument( "--debug", default=False, action="store_true", help="specify this argument for debugging mode", ) parser.add_argument( "--ignore_checkpoint_config", default=False, action="store_true", help="""specify this argument to ignore the compatibility of the config (or lack of config) attached to the checkpoint; this will allow mismatches between the training specified in the config and the actual training of the model""", ) parser.add_argument( "--log_freq", default=5, type=int, help="Logging frequency for LossLrMeterLoggingHook (default 5)", ) parser.add_argument( "--image_backend", default="PIL", type=str, help="torchvision image decoder backend (PIL or accimage). Default PIL", ) parser.add_argument( "--video_backend", default="pyav", type=str, help="torchvision video decoder backend (pyav or video_reader). Default pyav", ) parser.add_argument( "--distributed_backend", default="none", type=str, help="""Distributed backend: either 'none' (for non-distributed runs) or 'ddp' (for distributed runs). Default none.""", ) return parser
Perform assertions on generic command-line arguments.
def check_generic_args(args): """ Perform assertions on generic command-line arguments. """ # check types and values: assert is_pos_int(args.visdom_port), "incorrect visdom port" # create checkpoint folder if it does not exist: if args.checkpoint_folder != "" and not os.path.exists(args.checkpoint_folder): os.makedirs(args.checkpoint_folder, exist_ok=True) assert os.path.exists(args.checkpoint_folder), ( "could not create folder %s" % args.checkpoint_folder ) # when in debugging mode, enter debugger upon error: if args.debug: import sys from classy_vision.generic.debug import debug_info sys.excepthook = debug_info # check visdom server name: if args.visdom_server != "": if args.visdom_server.startswith("https://"): print("WARNING: Visdom does not work over HTTPS.") args.visdom_server = args.visdom_server[8:] if not args.visdom_server.startswith("http://"): args.visdom_server = "http://%s" % args.visdom_server # return input arguments: return args
Return a standard command-line parser.
def get_parser(): """ Return a standard command-line parser. """ parser = argparse.ArgumentParser( description="""Start a Classy Vision training job. This can be used for training on your local machine, using CPU or GPU, and for distributed training. This script also supports Tensorboard, Visdom and checkpointing.""" ) parser = add_generic_args(parser) return parser
Assert and parse the command-line arguments of a given (or default) parser.
def parse_train_arguments(parser=None): """ Assert and parse the command-line arguments of a given (or default) parser. """ # set input arguments: if parser is None: parser = get_parser() # parse input arguments: args = parser.parse_args() # assertions: args = check_generic_args(args) return args
Performs CPU or GPU profiling of the specified model on the specified input.
def profile( model: nn.Module, batchsize_per_replica: int = 32, input_shape: Tuple[int] = (3, 224, 224), use_nvprof: bool = False, input_key: Optional[Union[str, List[str]]] = None, ): """ Performs CPU or GPU profiling of the specified model on the specified input. """ # assertions: if use_nvprof: raise ClassyProfilerError("Profiling not supported with nvprof") # FIXME (mannatsingh): in case of use_nvprof, exit() is called at the end # and we do not return a profile. assert is_on_gpu(model), "can only nvprof model that lives on GPU" logging.info("CUDA profiling: Make sure you are running under nvprof!") # input for model: input = get_model_dummy_input( model, input_shape, input_key, batchsize=batchsize_per_replica, non_blocking=False, ) # perform profiling in eval mode with eval_model(model), torch.no_grad(): model(input) # warm up CUDA memory allocator and profiler if use_nvprof: # nvprof profiling (TODO: Can we infer this?) cudart().cudaProfilerStart() model(input) cudart().cudaProfilerStop() exit() # exit gracefully else: # regular profiling with torch.autograd.profiler.profile(use_cuda=True) as profiler: model(input) return profiler
Some layer may take/generate tuple/list/dict/list[dict] as input/output in forward function. We recursively query tensor shape.
def get_shape(x: Union[Tuple, List, Dict]) -> Union[Tuple, List, Dict]: """ Some layer may take/generate tuple/list/dict/list[dict] as input/output in forward function. We recursively query tensor shape. """ if isinstance(x, (list, tuple)): assert len(x) > 0, "x of tuple/list type must have at least one element" return [get_shape(xi) for xi in x] elif isinstance(x, dict): return {k: get_shape(v) for k, v in x.items()} else: assert isinstance(x, torch.Tensor), "x is expected to be a torch tensor" return x.size()
Computes the number of FLOPs required for a single layer. For common layers, such as Conv1d, the flop compute is implemented in this centralized place. For other layers, if it defines a method to compute flops with the signature below, we will use it to compute flops. Class MyModule(nn.Module): def flops(self, x): ...
def _layer_flops(layer: nn.Module, layer_args: List[Any], y: Any) -> int: """ Computes the number of FLOPs required for a single layer. For common layers, such as Conv1d, the flop compute is implemented in this centralized place. For other layers, if it defines a method to compute flops with the signature below, we will use it to compute flops. Class MyModule(nn.Module): def flops(self, x): ... """ x = layer_args[0] # get layer type: typestr = layer.__repr__() layer_type = typestr[: typestr.find("(")].strip() batchsize_per_replica = get_batchsize_per_replica(x) flops = None # 1D convolution: if layer_type in ["Conv1d"]: # x shape is N x C x W out_w = int( (x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) / layer.stride[0] + 1 ) flops = ( batchsize_per_replica * layer.in_channels * layer.out_channels * layer.kernel_size[0] * out_w / layer.groups ) # 2D convolution: elif layer_type in ["Conv2d"]: out_h = int( (x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) / layer.stride[0] + 1 ) out_w = int( (x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) / layer.stride[1] + 1 ) flops = ( batchsize_per_replica * layer.in_channels * layer.out_channels * layer.kernel_size[0] * layer.kernel_size[1] * out_h * out_w / layer.groups ) # learned group convolution: elif layer_type in ["LearnedGroupConv"]: conv = layer.conv out_h = int( (x.size()[2] + 2 * conv.padding[0] - conv.kernel_size[0]) / conv.stride[0] + 1 ) out_w = int( (x.size()[3] + 2 * conv.padding[1] - conv.kernel_size[1]) / conv.stride[1] + 1 ) count1 = _layer_flops(layer.relu, x) + _layer_flops(layer.norm, x) count2 = ( batchsize_per_replica * conv.in_channels * conv.out_channels * conv.kernel_size[0] * conv.kernel_size[1] * out_h * out_w / layer.condense_factor ) flops = count1 + count2 # non-linearities: elif layer_type in ["ReLU", "ReLU6", "Tanh", "Sigmoid", "Softmax", "SiLU"]: flops = x.numel() # 2D pooling layers: elif layer_type in ["AvgPool2d", "MaxPool2d"]: in_h = x.size()[2] in_w = x.size()[3] if isinstance(layer.kernel_size, int): layer.kernel_size = (layer.kernel_size, layer.kernel_size) kernel_ops = layer.kernel_size[0] * layer.kernel_size[1] out_h = 1 + int( (in_h + 2 * layer.padding - layer.kernel_size[0]) / layer.stride ) out_w = 1 + int( (in_w + 2 * layer.padding - layer.kernel_size[1]) / layer.stride ) flops = x.size()[0] * x.size()[1] * out_w * out_h * kernel_ops # adaptive avg pool2d # This is approximate and works only for downsampling without padding # based on aten/src/ATen/native/AdaptiveAveragePooling.cpp elif layer_type in ["AdaptiveAvgPool2d"]: in_h = x.size()[2] in_w = x.size()[3] if isinstance(layer.output_size, int): out_h, out_w = layer.output_size, layer.output_size elif len(layer.output_size) == 1: out_h, out_w = layer.output_size[0], layer.output_size[0] else: out_h, out_w = layer.output_size if out_h > in_h or out_w > in_w: raise ClassyProfilerNotImplementedError(layer) batchsize_per_replica = x.size()[0] num_channels = x.size()[1] kh = in_h - out_h + 1 kw = in_w - out_w + 1 kernel_ops = kh * kw flops = batchsize_per_replica * num_channels * out_h * out_w * kernel_ops # linear layer: elif layer_type in ["Linear"]: weight_ops = layer.weight.numel() bias_ops = layer.bias.numel() if layer.bias is not None else 0 flops = ((x.numel() / x.size(-1)) if x.ndim > 2 else x.size(0)) * ( weight_ops + bias_ops ) # batch normalization / layer normalization: elif layer_type in [ "BatchNorm1d", "BatchNorm2d", "BatchNorm3d", "SyncBatchNorm", "LayerNorm", ]: flops = 2 * x.numel() # 3D convolution elif layer_type in ["Conv3d"]: out_t = int( (x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) // layer.stride[0] + 1 ) out_h = int( (x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) // layer.stride[1] + 1 ) out_w = int( (x.size()[4] + 2 * layer.padding[2] - layer.kernel_size[2]) // layer.stride[2] + 1 ) flops = ( batchsize_per_replica * layer.in_channels * layer.out_channels * layer.kernel_size[0] * layer.kernel_size[1] * layer.kernel_size[2] * out_t * out_h * out_w / layer.groups ) # 3D pooling layers elif layer_type in ["AvgPool3d", "MaxPool3d"]: in_t = x.size()[2] in_h = x.size()[3] in_w = x.size()[4] if isinstance(layer.kernel_size, int): layer.kernel_size = ( layer.kernel_size, layer.kernel_size, layer.kernel_size, ) if isinstance(layer.padding, int): layer.padding = (layer.padding, layer.padding, layer.padding) if isinstance(layer.stride, int): layer.stride = (layer.stride, layer.stride, layer.stride) kernel_ops = layer.kernel_size[0] * layer.kernel_size[1] * layer.kernel_size[2] out_t = 1 + int( (in_t + 2 * layer.padding[0] - layer.kernel_size[0]) / layer.stride[0] ) out_h = 1 + int( (in_h + 2 * layer.padding[1] - layer.kernel_size[1]) / layer.stride[1] ) out_w = 1 + int( (in_w + 2 * layer.padding[2] - layer.kernel_size[2]) / layer.stride[2] ) flops = batchsize_per_replica * x.size()[1] * out_t * out_h * out_w * kernel_ops # adaptive avg pool3d # This is approximate and works only for downsampling without padding # based on aten/src/ATen/native/AdaptiveAveragePooling3d.cpp elif layer_type in ["AdaptiveAvgPool3d"]: in_t = x.size()[2] in_h = x.size()[3] in_w = x.size()[4] out_t = layer.output_size[0] out_h = layer.output_size[1] out_w = layer.output_size[2] if out_t > in_t or out_h > in_h or out_w > in_w: raise ClassyProfilerNotImplementedError(layer) batchsize_per_replica = x.size()[0] num_channels = x.size()[1] kt = in_t - out_t + 1 kh = in_h - out_h + 1 kw = in_w - out_w + 1 kernel_ops = kt * kh * kw flops = ( batchsize_per_replica * num_channels * out_t * out_w * out_h * kernel_ops ) elif layer_type in ["Dropout", "Identity"]: flops = 0 elif hasattr(layer, "flops"): # If the module already defines a method to compute flops with the signature # below, we use it to compute flops # # Class MyModule(nn.Module): # def flops(self, x): # ... # or # # Class MyModule(nn.Module): # def flops(self, x1, x2): # ... flops = layer.flops(*layer_args) if flops is None: raise ClassyProfilerNotImplementedError(layer) message = [ f"module type: {typestr}", f"input size: {get_shape(x)}", f"output size: {get_shape(y)}", f"params(M): {count_params(layer) / 1e6}", f"flops(M): {int(flops) / 1e6}", ] logging.debug("\t".join(message)) return int(flops)
Computes the number of activations produced by a single layer. Activations are counted only for convolutional and linear layers. To override this behavior, a layer can define a method to compute activations with the signature below, which will be used to compute the activations instead. Class MyModule(nn.Module): def activations(self, out, *layer_args): ...
def _layer_activations(layer: nn.Module, layer_args: List[Any], out: Any) -> int: """ Computes the number of activations produced by a single layer. Activations are counted only for convolutional and linear layers. To override this behavior, a layer can define a method to compute activations with the signature below, which will be used to compute the activations instead. Class MyModule(nn.Module): def activations(self, out, *layer_args): ... """ typestr = layer.__repr__() if hasattr(layer, "activations"): activations = layer.activations(out, *layer_args) elif isinstance(layer, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)): activations = out.numel() else: return 0 message = [f"module: {typestr}", f"activations: {activations}"] logging.debug("\t".join(message)) return int(activations)
Summarizes the statistics in the specified profiler.
def summarize_profiler_info(prof: torch.autograd.profiler.profile) -> str: """ Summarizes the statistics in the specified profiler. """ # create sorted list of times per operator: op2time = {} for item in prof.key_averages(): op2time[item.key] = ( item.cpu_time_total / 1000.0, item.cuda_time_total / 1000.0, ) # to milliseconds op2time = sorted(op2time.items(), key=operator.itemgetter(1), reverse=True) # created string containing information: str = "\n%s\tCPU Time\tCUDA Time\n" % ("Key".rjust(20)) for (key, value) in op2time: str += "%s\t%2.5f ms\t%2.5f ms\n" % (key.rjust(20), value[0], value[1]) return str
Patch the module to compute a module's parameters, like FLOPs. Calls compute_fn and passes the results to the complexity computer.
def _patched_computation_module( module: nn.Module, complexity_computer: ComplexityComputer, module_name: str ): """ Patch the module to compute a module's parameters, like FLOPs. Calls compute_fn and passes the results to the complexity computer. """ ty = type(module) typestring = module.__repr__() class ComputeModule(ty): orig_type = ty def _original_forward(self, *args, **kwargs): return ty.forward(self, *args, **kwargs) def forward(self, *args, **kwargs): out = self._original_forward(*args, **kwargs) complexity_computer.compute(self, list(args), out, module_name) return out def __repr__(self): return typestring return ComputeModule
Modify forward pass to measure a module's parameters, like FLOPs.
def modify_forward( model: nn.Module, complexity_computer: ComplexityComputer, prefix: str = "", patch_attr: str = None, ) -> nn.Module: """ Modify forward pass to measure a module's parameters, like FLOPs. """ # Recursively update all the modules in the model. A module is patched if it # contains the patch_attr (like the flops() function for FLOPs computation) or it is # a leaf. We stop recursing if we patch a module since that module is supposed # to return the results for all its children as well. # Since this recursion can lead to the same module being patched through different # paths, we make sure we only patch un-patched modules. if hasattr(model, "orig_type"): return model if is_leaf(model) or (patch_attr is not None and hasattr(model, patch_attr)): model.__class__ = _patched_computation_module( model, complexity_computer, prefix ) else: for name, child in model.named_children(): modify_forward( child, complexity_computer, prefix=f"{prefix}.{name}", patch_attr=patch_attr, ) return model
Restore original forward in model.
def restore_forward(model: nn.Module, patch_attr: str = None) -> nn.Module: """ Restore original forward in model. """ for module in model.modules(): if hasattr(module, "orig_type"): # module has been patched; un-patch it module.__class__ = module.orig_type return model
Compute the complexity of a forward pass. Args: compute_unique: If True, the compexity for a given module is only calculated once. Otherwise, it is counted every time the module is called. TODO(@mannatsingh): We have some assumptions about only modules which are leaves or have patch_attr defined. This should be fixed and generalized if possible.
def compute_complexity( model: nn.Module, compute_fn: Callable, input_shape: Tuple[int], input_key: Optional[Union[str, List[str]]] = None, patch_attr: str = None, compute_unique: bool = False, ) -> int: """ Compute the complexity of a forward pass. Args: compute_unique: If True, the compexity for a given module is only calculated once. Otherwise, it is counted every time the module is called. TODO(@mannatsingh): We have some assumptions about only modules which are leaves or have patch_attr defined. This should be fixed and generalized if possible. """ # assertions, input, and upvalue in which we will perform the count: assert isinstance(model, nn.Module) if not isinstance(input_shape, abc.Sequence) and not isinstance(input_shape, dict): return None else: input = get_model_dummy_input(model, input_shape, input_key) complexity_computer = ComplexityComputer(compute_fn, compute_unique) # measure FLOPs: modify_forward(model, complexity_computer, patch_attr=patch_attr) try: # compute complexity in eval mode with eval_model(model), torch.no_grad(): model.forward(input) finally: restore_forward(model, patch_attr=patch_attr) return complexity_computer.count
Compute the number of FLOPs needed for a forward pass.
def compute_flops( model: nn.Module, input_shape: Tuple[int] = (3, 224, 224), input_key: Optional[Union[str, List[str]]] = None, ) -> int: """ Compute the number of FLOPs needed for a forward pass. """ return compute_complexity( model, _layer_flops, input_shape, input_key, patch_attr="flops", )
Compute the number of activations created in a forward pass.
def compute_activations( model: nn.Module, input_shape: Tuple[int] = (3, 224, 224), input_key: Optional[Union[str, List[str]]] = None, ) -> int: """ Compute the number of activations created in a forward pass. """ return compute_complexity( model, _layer_activations, input_shape, input_key, patch_attr="activations", )
Count the number of parameters in a model.
def count_params(model: nn.Module) -> int: """ Count the number of parameters in a model. """ assert isinstance(model, nn.Module) return sum((parameter.nelement() for parameter in model.parameters()))
Automatically imports all packages under the root directory. For instance, if your directories look like: root / foo / __init__.py root / foo / abc.py root / bar.py root / baz / xyz.py This function will import the package foo, but not bar or baz.
def import_all_packages_from_directory(root: str) -> None: """Automatically imports all packages under the root directory. For instance, if your directories look like: root / foo / __init__.py root / foo / abc.py root / bar.py root / baz / xyz.py This function will import the package foo, but not bar or baz.""" for file in os.listdir(root): # Try to import each file in the directory. Our previous implementation # would look for directories here and see if there's a __init__.py # under that directory, but that turns out to be unreliable while # running on AWS: EFS filesystems cache metadata bits so the directory # and existence checks fail even when the import succeeds. We should # find a better workaround eventually, but this will do for now. try: file = Path(file) module_name = file.name # Dots have special meaning in Python packages -- it's a relative # import or a subpackage. Skip these. if "." not in module_name and module_name not in sys.modules: logging.debug(f"Automatically importing {module_name}") importlib.import_module(module_name) except ModuleNotFoundError: pass
Returns True if a number is a positive integer.
def is_pos_int(number: int) -> bool: """ Returns True if a number is a positive integer. """ return type(number) == int and number >= 0
Returns True if a number is a positive float.
def is_pos_float(number: float) -> bool: """ Returns True if a number is a positive float. """ return type(number) == float and number >= 0.0
Returns True if a list contains positive integers
def is_pos_int_list(l: List) -> bool: """ Returns True if a list contains positive integers """ return type(l) == list and all(is_pos_int(n) for n in l)
Returns True if a tuple contains positive integers
def is_pos_int_tuple(t: Tuple) -> bool: """ Returns True if a tuple contains positive integers """ return type(t) == tuple and all(is_pos_int(n) for n in t)
Returns True if a tensor is a long tensor.
def is_long_tensor(tensor: torch.Tensor) -> bool: """ Returns True if a tensor is a long tensor. """ if torch.is_tensor(tensor): return tensor.type().endswith("LongTensor") else: return False
Returns True if a tensor is a float tensor.
def is_float_tensor(tensor: torch.Tensor) -> bool: """ Returns True if a tensor is a float tensor. """ if torch.is_tensor(tensor): return tensor.type().endswith("FloatTensor") else: return False
Returns True if a tensor is a double tensor.
def is_double_tensor(tensor: torch.Tensor) -> bool: """ Returns True if a tensor is a double tensor. """ if torch.is_tensor(tensor): return tensor.type().endswith("DoubleTensor") else: return False
Returns True if module is leaf in the graph.
def is_leaf(module: nn.Module) -> bool: """ Returns True if module is leaf in the graph. """ assert isinstance(module, nn.Module), "module should be nn.Module" return len(list(module.children())) == 0 or hasattr(module, "_mask")
Returns True if all parameters of a model live on the GPU.
def is_on_gpu(model: torch.nn.Module) -> bool: """ Returns True if all parameters of a model live on the GPU. """ assert isinstance(model, torch.nn.Module) on_gpu = True has_params = False for param in model.parameters(): has_params = True if not param.data.is_cuda: on_gpu = False return has_params and on_gpu
Returns True if sample is not None and constituents are not none.
def is_not_none(sample: Any) -> bool: """ Returns True if sample is not None and constituents are not none. """ if sample is None: return False if isinstance(sample, (list, tuple)): if any(s is None for s in sample): return False if isinstance(sample, dict): if any(s is None for s in sample.values()): return False return True
Copies a model and (optional) loss to GPU and enables cudnn benchmarking. For multiple gpus training, the model in DistributedDataParallel for distributed training.
def copy_model_to_gpu(model, loss=None): """ Copies a model and (optional) loss to GPU and enables cudnn benchmarking. For multiple gpus training, the model in DistributedDataParallel for distributed training. """ if not torch.backends.cudnn.deterministic: torch.backends.cudnn.benchmark = True model = model.cuda() if loss is not None: loss = loss.cuda() return model, loss else: return model
Recursively searches lists, tuples, dicts and copies any object which supports an object.to API (e.g. tensors) to device if possible. Other values are passed as-is in the result. Note: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the device.
def recursive_copy_to_device( value: Any, non_blocking: bool, device: torch.device ) -> Any: """ Recursively searches lists, tuples, dicts and copies any object which supports an object.to API (e.g. tensors) to device if possible. Other values are passed as-is in the result. Note: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the device. """ if isinstance(value, list) or isinstance(value, tuple): device_val = [] for val in value: device_val.append( recursive_copy_to_device(val, non_blocking=non_blocking, device=device) ) return device_val if isinstance(value, list) else tuple(device_val) elif isinstance(value, collections.abc.Mapping): device_val = {} for key, val in value.items(): device_val[key] = recursive_copy_to_device( val, non_blocking=non_blocking, device=device ) return device_val elif callable(getattr(value, "to", None)): return value.to(device=device, non_blocking=non_blocking) return value
Recursively searches lists, tuples, dicts and copies tensors to GPU if possible. Non-tensor values are passed as-is in the result. Note: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the GPU.
def recursive_copy_to_gpu(value: Any, non_blocking: bool = True) -> Any: """ Recursively searches lists, tuples, dicts and copies tensors to GPU if possible. Non-tensor values are passed as-is in the result. Note: These are all copies, so if there are two objects that reference the same object, then after this call, there will be two different objects referenced on the GPU. """ return recursive_copy_to_device( value=value, non_blocking=non_blocking, device=GPU_DEVICE )
Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward
def numpy_seed(seed: Optional[int], *addl_seeds: int) -> None: """Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward""" if seed is None: yield return if len(addl_seeds) > 0: seed = int(hash((seed, *addl_seeds)) % 1e6) state = np.random.get_state() np.random.seed(seed) try: yield finally: np.random.set_state(state)
Loads a checkpoint on primary and broadcasts it to all replicas. This is a collective operation which needs to be run in sync on all replicas. See :func:`load_checkpoint` for the arguments.
def load_and_broadcast_checkpoint( checkpoint_path: str, device: torch.device = CPU_DEVICE ) -> Optional[Dict]: """Loads a checkpoint on primary and broadcasts it to all replicas. This is a collective operation which needs to be run in sync on all replicas. See :func:`load_checkpoint` for the arguments. """ if is_primary(): checkpoint = load_checkpoint(checkpoint_path, device) else: checkpoint = None logging.info(f"Broadcasting checkpoint loaded from {checkpoint_path}") return broadcast_object(checkpoint)
Loads a checkpoint from the specified checkpoint path. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. If it is a directory, the checkpoint is loaded from :py:data:`CHECKPOINT_FILE` inside the directory. device: device to load the checkpoint to Returns: The checkpoint, if it exists, or None.
def load_checkpoint( checkpoint_path: str, device: torch.device = CPU_DEVICE ) -> Optional[Dict]: """Loads a checkpoint from the specified checkpoint path. Args: checkpoint_path: The path to load the checkpoint from. Can be a file or a directory. If it is a directory, the checkpoint is loaded from :py:data:`CHECKPOINT_FILE` inside the directory. device: device to load the checkpoint to Returns: The checkpoint, if it exists, or None. """ if not checkpoint_path: return None assert device is not None, "Please specify what device to load checkpoint on" assert device.type in ["cpu", "cuda"], f"Unknown device: {device}" if device.type == "cuda": assert torch.cuda.is_available() if not PathManager.exists(checkpoint_path): logging.warning(f"Checkpoint path {checkpoint_path} not found") return None if PathManager.isdir(checkpoint_path): checkpoint_path = f"{checkpoint_path.rstrip('/')}/{CHECKPOINT_FILE}" if not PathManager.exists(checkpoint_path): logging.warning(f"Checkpoint file {checkpoint_path} not found.") return None logging.info(f"Attempting to load checkpoint from {checkpoint_path}") # load model on specified device and not on saved device for model and return # the checkpoint with PathManager.open(checkpoint_path, "rb") as f: checkpoint = torch.load(f, map_location=device) logging.info(f"Loaded checkpoint from {checkpoint_path}") return checkpoint
Updates the model with the provided model state dictionary. Args: model: ClassyVisionModel instance to update model_state_dict: State dict, should be the output of a call to ClassyVisionModel.get_classy_state(). reset_heads: if False, uses the heads' state from model_state_dict. strict: if True, strictly match the module/buffer keys in current model and pass-in model_state_dict
def update_classy_model( model, model_state_dict: Dict, reset_heads: bool, strict: bool = True ) -> bool: """ Updates the model with the provided model state dictionary. Args: model: ClassyVisionModel instance to update model_state_dict: State dict, should be the output of a call to ClassyVisionModel.get_classy_state(). reset_heads: if False, uses the heads' state from model_state_dict. strict: if True, strictly match the module/buffer keys in current model and pass-in model_state_dict """ try: if reset_heads: current_model_state_dict = model.get_classy_state() # replace the checkpointed head states with source head states model_state_dict["model"]["heads"] = current_model_state_dict["model"][ "heads" ] model.set_classy_state(model_state_dict, strict=strict) logging.info("Model state load successful") return True except Exception: logging.exception("Could not load the model state") return False
Updates the task with the provided task dictionary. Args: task: ClassyTask instance to update state_dict: State dict, should be the output of a call to ClassyTask.get_classy_state().
def update_classy_state(task, state_dict: Dict) -> bool: """ Updates the task with the provided task dictionary. Args: task: ClassyTask instance to update state_dict: State dict, should be the output of a call to ClassyTask.get_classy_state(). """ logging.info("Loading classy state from checkpoint") try: task.set_classy_state(state_dict) logging.info("Checkpoint load successful") return True except Exception: logging.exception("Could not load the checkpoint") return False
Saves a state variable to the specified checkpoint folder. Returns the filename of the checkpoint if successful. Raises an exception otherwise.
def save_checkpoint(checkpoint_folder, state, checkpoint_file=CHECKPOINT_FILE): """ Saves a state variable to the specified checkpoint folder. Returns the filename of the checkpoint if successful. Raises an exception otherwise. """ # make sure that we have a checkpoint folder: if not PathManager.isdir(checkpoint_folder): try: PathManager.mkdirs(checkpoint_folder) except BaseException: logging.warning("Could not create folder %s." % checkpoint_folder) raise # write checkpoint atomically: try: full_filename = f"{checkpoint_folder}/{checkpoint_file}" with PathManager.open(full_filename, "wb") as f: torch.save(state, f) return full_filename except BaseException: logging.warning( "Unable to write checkpoint to %s." % checkpoint_folder, exc_info=True ) raise
Flattens nested dict into (key, val) dict. Used for flattening meters structure, so that they can be visualized.
def flatten_dict(value_dict: Dict, prefix="", sep="_") -> Dict: """ Flattens nested dict into (key, val) dict. Used for flattening meters structure, so that they can be visualized. """ items = [] for k, v in value_dict.items(): key = prefix + sep + k if prefix else k if isinstance(v, collections.abc.MutableMapping): items.extend(flatten_dict(value_dict=v, prefix=key, sep=sep).items()) else: items.append((key, v)) return dict(items)
Loads a json config from a file.
def load_json(json_path): """ Loads a json config from a file. """ assert os.path.exists(json_path), "Json file %s not found" % json_path json_file = open(json_path) json_config = json_file.read() json_file.close() try: config = json.loads(json_config) except BaseException as err: raise Exception("Failed to validate config with error: %s" % str(err)) return config
Context manager which seeds the PyTorch PRNG with the specified seed and restores the state afterward. Setting seed to None is equivalent to running the code without the context manager.
def torch_seed(seed: Optional[int]): """Context manager which seeds the PyTorch PRNG with the specified seed and restores the state afterward. Setting seed to None is equivalent to running the code without the context manager.""" if seed is None: yield return state = torch.get_rng_state() torch.manual_seed(seed) try: yield finally: torch.set_rng_state(state)
This function converts target class indices to one-hot vectors, given the number of classes.
def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: """ This function converts target class indices to one-hot vectors, given the number of classes. """ assert ( torch.max(targets).item() < classes ), "Class Index must be less than number of classes" one_hot_targets = torch.zeros( (targets.shape[0], classes), dtype=torch.long, device=targets.device ) one_hot_targets.scatter_(1, targets.long(), 1) return one_hot_targets
This function infers whether target is integer or 0/1 encoded and converts it to 0/1 encoding if necessary.
def maybe_convert_to_one_hot( target: torch.Tensor, model_output: torch.Tensor ) -> torch.Tensor: """ This function infers whether target is integer or 0/1 encoded and converts it to 0/1 encoding if necessary. """ target_shape_list = list(target.size()) if len(target_shape_list) == 1 or ( len(target_shape_list) == 2 and target_shape_list[1] == 1 ): target = convert_to_one_hot(target.view(-1, 1), model_output.shape[1]) # target are not necessarily hard 0/1 encoding. It can be soft # (i.e. fractional) in some cases, such as mixup label assert ( target.shape == model_output.shape ), "Target must of the same shape as model_output." return target
Some layer may take tuple/list/dict/list[dict] as input in forward function. We recursively dive into the tuple/list until we meet a tensor and infer the batch size
def get_batchsize_per_replica(x: Union[Tuple, List, Dict]) -> int: """ Some layer may take tuple/list/dict/list[dict] as input in forward function. We recursively dive into the tuple/list until we meet a tensor and infer the batch size """ while isinstance(x, (list, tuple)): assert len(x) > 0, "input x of tuple/list type must have at least one element" x = x[0] if isinstance(x, (dict,)): # index zero is always equal to batch size. select an arbitrary key. key_list = list(x.keys()) x = x[key_list[0]] return x.size()[0]
Finds the set of BatchNorm parameters in the model. Recursively traverses all parameters in the given model and returns a tuple of lists: the first element is the set of batchnorm parameters, the second list contains all other parameters of the model.
def split_batchnorm_params(model: nn.Module): """Finds the set of BatchNorm parameters in the model. Recursively traverses all parameters in the given model and returns a tuple of lists: the first element is the set of batchnorm parameters, the second list contains all other parameters of the model.""" batchnorm_params = [] other_params = [] for module in model.modules(): # If module has children (i.e. internal node of constructed DAG) then # only add direct parameters() to the list of params, else go over # children node to find if they are BatchNorm or have "bias". if list(module.children()) != []: for params in module.parameters(recurse=False): if params.requires_grad: other_params.append(params) elif isinstance(module, nn.modules.batchnorm._BatchNorm): for params in module.parameters(): if params.requires_grad: batchnorm_params.append(params) else: for params in module.parameters(): if params.requires_grad: other_params.append(params) return batchnorm_params, other_params
Context manager which sets the train mode of a model. After returning, it restores the state of every sub-module individually.
def _train_mode(model: nn.Module, train_mode: bool): """Context manager which sets the train mode of a model. After returning, it restores the state of every sub-module individually.""" train_modes = {} for name, module in model.named_modules(): train_modes[name] = module.training try: model.train(train_mode) yield finally: for name, module in model.named_modules(): module.training = train_modes[name]
This function is used to log the usage of different Classy components.
def log_class_usage(component_type, klass): """This function is used to log the usage of different Classy components.""" identifier = "ClassyVision" if klass and hasattr(klass, "__name__"): identifier += f".{component_type}.{klass.__name__}" torch._C._log_api_usage_once(identifier)
Get the torch version as [major, minor]. All comparisons must be done with the two version values. Revisions are not supported.
def get_torch_version(): """Get the torch version as [major, minor]. All comparisons must be done with the two version values. Revisions are not supported. """ version_list = torch.__version__.split(".")[:2] return [int(version_str) for version_str in version_list]
Generator to iterate over all parameters in the optimizer param_groups. When apex is available, uses that to guarantee we get the FP32 copy of the parameters when O2 is enabled. Otherwise, iterate ourselves.
def master_params(optimizer): """Generator to iterate over all parameters in the optimizer param_groups. When apex is available, uses that to guarantee we get the FP32 copy of the parameters when O2 is enabled. Otherwise, iterate ourselves.""" if apex_available: yield from apex.amp.master_params(optimizer) else: for group in optimizer.param_groups: for p in group["params"]: yield p
Connects to a visdom server if not currently connected.
def visdom_connect(server: Optional[str] = None, port: Optional[int] = None) -> None: """Connects to a visdom server if not currently connected.""" if not visdom_connected(): vis.append(visdom.Visdom(server=server, port=port))
Returns True if the client is connected to a visdom server.
def visdom_connected() -> bool: """Returns True if the client is connected to a visdom server.""" return ( len(vis) > 0 and hasattr(vis[-1], "check_connection") and vis[-1].check_connection() )
Plots the specified dict of learning curves in visdom. Optionally, the environment, window handle, and title for the visdom plot can be specified.
def plot_learning_curves( curves: Dict[str, List], visdom_server: Optional["visdom.Visdom"] = None, env: Optional[str] = None, win: Optional[str] = None, title: str = "", ) -> Any: """Plots the specified dict of learning curves in visdom. Optionally, the environment, window handle, and title for the visdom plot can be specified. """ if visdom_server is None and visdom_connected(): visdom_server = vis[-1] # return if we are not connected to visdom server: if not visdom_server or not visdom_server.check_connection(): print("WARNING: Not connected to visdom. Skipping plotting.") return # assertions: assert type(curves) == dict assert all(type(curve) == list for _, curve in curves.items()) # remove batch time curves: _curves = {k: curves[k] for k in curves.keys() if "batch time" not in k} # show plot: X = torch.stack([torch.FloatTensor(curve) for _, curve in _curves.items()], dim=1) Y = torch.arange(0, X.size(0)) Y = Y.view(Y.numel(), 1).expand(Y.numel(), X.size(1)) opts = {"title": title, "legend": list(_curves.keys()), "xlabel": "Epochs"} return visdom_server.line(X, Y, env=env, win=win, opts=opts)
Constructs a plot of specified losses as function of y * f(x). The losses are a list of nn.Module losses. Optionally, the environment, window handle, and title for the visdom plot can be specified.
def plot_losses( losses: Union[nn.Module, List[nn.Module]], visdom_server: Optional["visdom.Visdom"] = None, env: Optional[str] = None, win: Optional[str] = None, title: str = "", ) -> Any: """Constructs a plot of specified losses as function of y * f(x). The losses are a list of nn.Module losses. Optionally, the environment, window handle, and title for the visdom plot can be specified. """ if visdom_server is None and visdom_connected(): visdom_server = vis[-1] # return if we are not connected to visdom server: if not visdom_server or not visdom_server.check_connection(): print("WARNING: Not connected to visdom. Skipping plotting.") return # assertions: if isinstance(losses, nn.Module): losses = [losses] assert type(losses) == list assert all(isinstance(loss, nn.Module) for loss in losses) if any(isinstance(loss, UNSUPPORTED_LOSSES) for loss in losses): raise NotImplementedError("loss function not supported") # loop over all loss functions: for idx, loss in enumerate(losses): # construct scores and targets: score = torch.arange(-5.0, 5.0, 0.005) if idx == 0: loss_val = torch.FloatTensor(score.size(0), len(losses)) if isinstance(loss, REGRESSION_LOSSES): target = torch.FloatTensor(score.size()).fill_(0.0) else: target = torch.LongTensor(score.size()).fill_(1) # compute loss values: for n in range(0, score.nelement()): loss_val[n][idx] = loss( score.narrow(0, n, 1), target.narrow(0, n, 1) ).item() # show plot: title = str(loss) if title == "" else title legend = [str(loss) for loss in losses] opts = {"title": title, "xlabel": "Score", "ylabel": "Loss", "legend": legend} win = visdom_server.line(loss_val, score, env=env, win=win, opts=opts) return win
Visualizes a model in TensorBoard. The TensorBoard writer can be either specified directly via `writer` or can be specified via a `folder`. The model can be run in training or evaluation model via the `train` argument. Example usage on devserver: - Install TensorBoard using: `sudo feature install tensorboard` - Start TensorBoard using: `tensorboard --port=8098 --logdir <folder>`
def plot_model( model: ClassyModel, size: Tuple[int, ...] = (3, 224, 224), input_key: Optional[Union[str, List[str]]] = None, writer: Optional["SummaryWriter"] = None, folder: str = "", train: bool = False, ) -> None: """Visualizes a model in TensorBoard. The TensorBoard writer can be either specified directly via `writer` or can be specified via a `folder`. The model can be run in training or evaluation model via the `train` argument. Example usage on devserver: - Install TensorBoard using: `sudo feature install tensorboard` - Start TensorBoard using: `tensorboard --port=8098 --logdir <folder>` """ assert ( writer is not None or folder != "" ), "must specify SummaryWriter or folder to create SummaryWriter in" input = get_model_dummy_input(model, size, input_key) if writer is None: writer = SummaryWriter(log_dir=folder, comment="Model graph") with torch.no_grad(): orig_train = model.training model.train(train) writer.add_graph(model, input_to_model=(input,)) model.train(orig_train) writer.flush()
Constructs a 2D map of images. The 2D coordinates for each of the images are specified in `mapcoord`, the corresponding images are in `dataset`. Optional arguments set the size of the map images, the size of the images themselves, the unnormalization transform, and whether or not to snap images to a grid.
def image_map( mapcoord: Union[np.ndarray, torch.Tensor], dataset: Union[ torch.utils.data.dataloader.DataLoader, torch.utils.data.dataset.Dataset ], mapsize: int = 5000, imsize: int = 32, unnormalize: Optional[Callable] = None, snap_to_grid: bool = False, ) -> torch.ByteTensor: """Constructs a 2D map of images. The 2D coordinates for each of the images are specified in `mapcoord`, the corresponding images are in `dataset`. Optional arguments set the size of the map images, the size of the images themselves, the unnormalization transform, and whether or not to snap images to a grid. """ # assertions: if type(mapcoord) == np.ndarray: mapcoord = torch.from_numpy(mapcoord) assert torch.is_tensor(mapcoord) if isinstance(dataset, torch.utils.data.dataloader.DataLoader): dataset = dataset.dataset assert isinstance(dataset, torch.utils.data.dataset.Dataset) assert is_pos_int(mapsize) assert is_pos_int(imsize) if unnormalize is not None: assert callable(unnormalize) # initialize some variables: import torchvision.transforms.functional as F background = 255 mapim = torch.ByteTensor(3, mapsize, mapsize).fill_(background) # normalize map coordinates: mapc = mapcoord.add(-(mapcoord.min())) mapc.div_(mapc.max()) # loop over images: for idx in range(len(dataset)): # compute grid location: if snap_to_grid: y = 1 + int(math.floor(mapc[idx][0] * (mapsize - imsize - 2))) x = 1 + int(math.floor(mapc[idx][1] * (mapsize - imsize - 2))) else: y = 1 + int(math.floor(mapc[idx][0] * (math.floor(mapsize - imsize) - 2))) x = 1 + int(math.floor(mapc[idx][1] * (math.floor(mapsize - imsize) - 2))) # check whether we can overwrite this location: overwrite = not snap_to_grid if not overwrite: segment = mapim.narrow(1, y, imsize).narrow(2, x, imsize) overwrite = segment.eq(background).all() # draw image: if overwrite: # load, unnormalize, and resize image: image = dataset[idx][0] if unnormalize is not None: image = unnormalize(image) resized_im = F.to_tensor( F.resize(F.to_pil_image(image), imsize, Image.BILINEAR) ) # place image: segment = mapim.narrow(1, y, imsize).narrow(2, x, imsize) segment.copy_(resized_im.mul_(255.0).byte()) # return map: return mapim
Registers a ClassyHead subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyHead from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyHead subclass, like this: .. code-block:: python @register_head("my_head") class MyHead(ClassyHead): ... To instantiate a head from a configuration file, see :func:`build_head`.
def register_head(name, bypass_checks=False): """Registers a ClassyHead subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyHead from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyHead subclass, like this: .. code-block:: python @register_head("my_head") class MyHead(ClassyHead): ... To instantiate a head from a configuration file, see :func:`build_head`.""" def register_head_cls(cls): if not bypass_checks: if name in HEAD_REGISTRY: msg = ( "Cannot register duplicate head ({}). Already registered at \n{}\n" ) raise ValueError(msg.format(name, HEAD_REGISTRY_TB[name])) if not issubclass(cls, ClassyHead): raise ValueError( "Head ({}: {}) must extend ClassyHead".format(name, cls.__name__) ) if cls.__name__ in HEAD_CLASS_NAMES: msg = ( "Cannot register head with duplicate class name({})." + "Previously registered at \n{}\n" ) raise ValueError( msg.format(cls.__name__, HEAD_CLASS_NAMES_TB[cls.__name__]) ) tb = "".join(traceback.format_stack()) HEAD_REGISTRY[name] = cls HEAD_CLASS_NAMES.add(cls.__name__) HEAD_REGISTRY_TB[name] = tb HEAD_CLASS_NAMES_TB[cls.__name__] = tb return cls return register_head_cls
Builds a ClassyHead from a config. This assumes a 'name' key in the config which is used to determine what head class to instantiate. For instance, a config `{"name": "my_head", "foo": "bar"}` will find a class that was registered as "my_head" (see :func:`register_head`) and call .from_config on it.
def build_head(config): """Builds a ClassyHead from a config. This assumes a 'name' key in the config which is used to determine what head class to instantiate. For instance, a config `{"name": "my_head", "foo": "bar"}` will find a class that was registered as "my_head" (see :func:`register_head`) and call .from_config on it.""" assert "name" in config, "Expect name in config" assert "unique_id" in config, "Expect a global unique id in config" assert config["name"] in HEAD_REGISTRY, "unknown head {}".format(config["name"]) name = config["name"] head_config = copy.deepcopy(config) del head_config["name"] return HEAD_REGISTRY[name].from_config(head_config)
Registers a :class:`ClassyHook` subclass. This decorator allows Classy Vision to instantiate a subclass of :class:`ClassyHook` from a configuration file, even if the class itself is not part of the base Classy Vision framework. To use it, apply this decorator to a ClassyHook subclass, like this: .. code-block:: python @register_hook('custom_hook') class CustomHook(ClassyHook): ... To instantiate a hook from a configuration file, see :func:`build_hook`.
def register_hook(name, bypass_checks=False): """Registers a :class:`ClassyHook` subclass. This decorator allows Classy Vision to instantiate a subclass of :class:`ClassyHook` from a configuration file, even if the class itself is not part of the base Classy Vision framework. To use it, apply this decorator to a ClassyHook subclass, like this: .. code-block:: python @register_hook('custom_hook') class CustomHook(ClassyHook): ... To instantiate a hook from a configuration file, see :func:`build_hook`. """ def register_hook_cls(cls): if not bypass_checks: if name in HOOK_REGISTRY: msg = ( "Cannot register duplicate hook ({}). Already registered at \n{}\n" ) raise ValueError(msg.format(name, HOOK_REGISTRY_TB[name])) if not issubclass(cls, ClassyHook): raise ValueError( "Hook ({}: {}) must extend ClassyHook".format(name, cls.__name__) ) if cls.__name__ in HOOK_CLASS_NAMES: msg = ( "Cannot register hook with duplicate class name({})." + "Previously registered at \n{}\n" ) raise ValueError( msg.format(cls.__name__, HOOK_CLASS_NAMES_TB[cls.__name__]) ) tb = "".join(traceback.format_stack()) HOOK_REGISTRY[name] = cls HOOK_CLASS_NAMES.add(cls.__name__) HOOK_REGISTRY_TB[name] = tb HOOK_CLASS_NAMES_TB[cls.__name__] = tb return cls return register_hook_cls
Builds a ClassyHook from a config. This assumes a 'name' key in the config which is used to determine what hook class to instantiate. For instance, a config `{"name": "my_hook", "foo": "bar"}` will find a class that was registered as "my_hook" (see :func:`register_hook`) and call .from_config on it.
def build_hook(hook_config: Dict[str, Any]): """Builds a ClassyHook from a config. This assumes a 'name' key in the config which is used to determine what hook class to instantiate. For instance, a config `{"name": "my_hook", "foo": "bar"}` will find a class that was registered as "my_hook" (see :func:`register_hook`) and call .from_config on it.""" assert hook_config["name"] in HOOK_REGISTRY, ( "Unregistered hook. Did you make sure to use the register_hook decorator " "AND import the hook file before calling this function??" ) hook_config = copy.deepcopy(hook_config) hook_name = hook_config.pop("name") return HOOK_REGISTRY[hook_name].from_config(hook_config)
Builds a ClassyLoss from a config. This assumes a 'name' key in the config which is used to determine what model class to instantiate. For instance, a config `{"name": "my_loss", "foo": "bar"}` will find a class that was registered as "my_loss" (see :func:`register_loss`) and call .from_config on it. In addition to losses registered with :func:`register_loss`, we also support instantiating losses available in the `torch.nn.modules.loss <https: //pytorch.org/docs/stable/nn.html#loss-functions>`_ module. Any keys in the config will get expanded to parameters of the loss constructor. For instance, the following call will instantiate a `torch.nn.modules.CrossEntropyLoss <https://pytorch.org/docs/stable/ nn.html#torch.nn.CrossEntropyLoss>`_: .. code-block:: python build_loss({"name": "CrossEntropyLoss", "reduction": "sum"})
def build_loss(config): """Builds a ClassyLoss from a config. This assumes a 'name' key in the config which is used to determine what model class to instantiate. For instance, a config `{"name": "my_loss", "foo": "bar"}` will find a class that was registered as "my_loss" (see :func:`register_loss`) and call .from_config on it. In addition to losses registered with :func:`register_loss`, we also support instantiating losses available in the `torch.nn.modules.loss <https: //pytorch.org/docs/stable/nn.html#loss-functions>`_ module. Any keys in the config will get expanded to parameters of the loss constructor. For instance, the following call will instantiate a `torch.nn.modules.CrossEntropyLoss <https://pytorch.org/docs/stable/ nn.html#torch.nn.CrossEntropyLoss>`_: .. code-block:: python build_loss({"name": "CrossEntropyLoss", "reduction": "sum"}) """ assert "name" in config, f"name not provided for loss: {config}" name = config["name"] args = copy.deepcopy(config) del args["name"] if "weight" in args and args["weight"] is not None: # if we are passing weights, we need to change the weights from a list # to a tensor args["weight"] = torch.tensor(args["weight"], dtype=torch.float) if name in LOSS_REGISTRY: loss = LOSS_REGISTRY[name].from_config(config) else: # the name should be available in torch.nn.modules.loss assert hasattr(torch_losses, name), ( f"{name} isn't a registered loss" ", nor is it available in torch.nn.modules.loss" ) loss = getattr(torch_losses, name)(**args) log_class_usage("Loss", loss.__class__) return loss
Registers a ClassyLoss subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyLoss from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyLoss subclass, like this: .. code-block:: python @register_loss("my_loss") class MyLoss(ClassyLoss): ... To instantiate a loss from a configuration file, see :func:`build_loss`.
def register_loss(name, bypass_checks=False): """Registers a ClassyLoss subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyLoss from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyLoss subclass, like this: .. code-block:: python @register_loss("my_loss") class MyLoss(ClassyLoss): ... To instantiate a loss from a configuration file, see :func:`build_loss`.""" def register_loss_cls(cls): if not bypass_checks: if name in LOSS_REGISTRY: msg = ( "Cannot register duplicate loss ({}). Already registered at \n{}\n" ) raise ValueError(msg.format(name, LOSS_REGISTRY_TB[name])) if not issubclass(cls, ClassyLoss): raise ValueError( "Loss ({}: {}) must extend ClassyLoss".format(name, cls.__name__) ) tb = "".join(traceback.format_stack()) LOSS_REGISTRY[name] = cls LOSS_CLASS_NAMES.add(cls.__name__) LOSS_REGISTRY_TB[name] = tb LOSS_CLASS_NAMES_TB[cls.__name__] = tb return cls return register_loss_cls
Builds a :class:`ClassyMeter` from a config. This assumes a 'name' key in the config which is used to determine what meter class to instantiate. For instance, a config `{"name": "my_meter", "foo": "bar"}` will find a class that was registered as "my_meter" (see :func:`register_meter`) and call .from_config on it.
def build_meter(config): """Builds a :class:`ClassyMeter` from a config. This assumes a 'name' key in the config which is used to determine what meter class to instantiate. For instance, a config `{"name": "my_meter", "foo": "bar"}` will find a class that was registered as "my_meter" (see :func:`register_meter`) and call .from_config on it.""" return METER_REGISTRY[config["name"]].from_config(config)
Registers a :class:`ClassyMeter` subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyMeter from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyMeter subclass, like this: .. code-block:: python @register_meter('accuracy') class AccuracyMeter(ClassyMeter): ... To instantiate a meter from a configuration file, see :func:`build_meter`.
def register_meter(name, bypass_checks=False): """Registers a :class:`ClassyMeter` subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyMeter from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyMeter subclass, like this: .. code-block:: python @register_meter('accuracy') class AccuracyMeter(ClassyMeter): ... To instantiate a meter from a configuration file, see :func:`build_meter`.""" def register_meter_cls(cls): if not bypass_checks: if name in METER_REGISTRY: msg = ( "Cannot register duplicate meter ({}). Already registered at \n{}\n" ) raise ValueError(msg.format(name, METER_REGISTRY_TB[name])) if not issubclass(cls, ClassyMeter): raise ValueError( "Meter ({}: {}) must extend \ ClassyMeter".format( name, cls.__name__ ) ) tb = "".join(traceback.format_stack()) METER_REGISTRY[name] = cls METER_REGISTRY_TB[name] = tb return cls return register_meter_cls
Swish activation function.
def swish(x): """ Swish activation function. """ return x * torch.sigmoid(x)
Apply drop connect to random inputs in a batch.
def drop_connect(inputs, is_training, drop_connect_rate): """ Apply drop connect to random inputs in a batch. """ if not is_training: return inputs keep_prob = 1 - drop_connect_rate # compute drop connect tensor batch_size = inputs.shape[0] random_tensor = keep_prob random_tensor += torch.rand( [batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device ) binary_tensor = torch.floor(random_tensor) outputs = (inputs / keep_prob) * binary_tensor return outputs
Calculates the scaled number of filters based on the width coefficient and rounds the result by the width divisor.
def scale_width(num_filters, width_coefficient, width_divisor, min_width): """ Calculates the scaled number of filters based on the width coefficient and rounds the result by the width divisor. """ if not width_coefficient: return num_filters num_filters *= width_coefficient min_width = min_width or width_divisor new_filters = max( min_width, (int(num_filters + width_divisor / 2) // width_divisor) * width_divisor, ) # Do not round down by more than 10% if new_filters < 0.9 * num_filters: new_filters += width_divisor return int(new_filters)
Calculates the scaled number of repeats based on the depth coefficient.
def scale_depth(num_repeats, depth_coefficient): """ Calculates the scaled number of repeats based on the depth coefficient. """ if not depth_coefficient: return num_repeats return int(math.ceil(depth_coefficient * num_repeats))
Returns the required padding for "same" style convolutions
def get_same_padding_for_kernel_size(kernel_size): """ Returns the required padding for "same" style convolutions """ if kernel_size % 2 == 0: raise ValueError(f"Only odd sized kernels are supported, got {kernel_size}") return (kernel_size - 1) // 2
Implementation of `R(2+1)D unit <https://arxiv.org/abs/1711.11248>`_. Decompose one 3D conv into one 2D spatial conv and one 1D temporal conv. Choose the middle dimensionality so that the total No. of parameters in 2D spatial conv and 1D temporal conv is unchanged. Args: dim_in (int): the channel dimensions of the input. dim_out (int): the channel dimension of the output. temporal_stride (int): the temporal stride of the bottleneck. spatial_stride (int): the spatial_stride of the bottleneck. groups (int): number of groups for the convolution. inplace_relu (bool): calculate the relu on the original input without allocating new memory. bn_eps (float): epsilon for batch norm. bn_mmt (float): momentum for batch norm. Noted that BN momentum in PyTorch = 1 - BN momentum in Caffe2. dim_mid (Optional[int]): If not None, use the provided channel dimension for the output of the 2D spatial conv. If None, compute the output channel dimension of the 2D spatial conv so that the total No. of model parameters remains unchanged.
def r2plus1_unit( dim_in, dim_out, temporal_stride, spatial_stride, groups, inplace_relu, bn_eps, bn_mmt, dim_mid=None, ): """ Implementation of `R(2+1)D unit <https://arxiv.org/abs/1711.11248>`_. Decompose one 3D conv into one 2D spatial conv and one 1D temporal conv. Choose the middle dimensionality so that the total No. of parameters in 2D spatial conv and 1D temporal conv is unchanged. Args: dim_in (int): the channel dimensions of the input. dim_out (int): the channel dimension of the output. temporal_stride (int): the temporal stride of the bottleneck. spatial_stride (int): the spatial_stride of the bottleneck. groups (int): number of groups for the convolution. inplace_relu (bool): calculate the relu on the original input without allocating new memory. bn_eps (float): epsilon for batch norm. bn_mmt (float): momentum for batch norm. Noted that BN momentum in PyTorch = 1 - BN momentum in Caffe2. dim_mid (Optional[int]): If not None, use the provided channel dimension for the output of the 2D spatial conv. If None, compute the output channel dimension of the 2D spatial conv so that the total No. of model parameters remains unchanged. """ if dim_mid is None: dim_mid = int(dim_out * dim_in * 3 * 3 * 3 / (dim_in * 3 * 3 + dim_out * 3)) logging.info( "dim_in: %d, dim_out: %d. Set dim_mid to %d" % (dim_in, dim_out, dim_mid) ) # 1x3x3 group conv, BN, ReLU conv_middle = nn.Conv3d( dim_in, dim_mid, [1, 3, 3], # kernel stride=[1, spatial_stride, spatial_stride], padding=[0, 1, 1], groups=groups, bias=False, ) conv_middle_bn = nn.BatchNorm3d(dim_mid, eps=bn_eps, momentum=bn_mmt) conv_middle_relu = nn.ReLU(inplace=inplace_relu) # 3x1x1 group conv conv = nn.Conv3d( dim_mid, dim_out, [3, 1, 1], # kernel stride=[temporal_stride, 1, 1], padding=[1, 0, 0], groups=groups, bias=False, ) return nn.Sequential(conv_middle, conv_middle_bn, conv_middle_relu, conv)
Converts a float to closest non-zero int divisible by q.
def _quantize_float(f, q): """Converts a float to closest non-zero int divisible by q.""" return int(round(f / q) * q)
Adjusts the compatibility of widths and groups, depending on the bottleneck ratio.
def _adjust_widths_groups_compatibilty(stage_widths, bottleneck_ratios, group_widths): """Adjusts the compatibility of widths and groups, depending on the bottleneck ratio.""" # Compute all widths for the current settings widths = [int(w * b) for w, b in zip(stage_widths, bottleneck_ratios)] groud_widths_min = [min(g, w_bot) for g, w_bot in zip(group_widths, widths)] # Compute the adjusted widths so that stage and group widths fit ws_bot = [_quantize_float(w_bot, g) for w_bot, g in zip(widths, groud_widths_min)] stage_widths = [int(w_bot / b) for w_bot, b in zip(ws_bot, bottleneck_ratios)] return stage_widths, groud_widths_min
helper function for constructing 3x3 grouped convolution
def conv3x3(in_planes, out_planes, stride=1, groups=1): """helper function for constructing 3x3 grouped convolution""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False, )
helper function for constructing 1x1 convolution
def conv1x1(in_planes, out_planes, stride=1): """helper function for constructing 1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
Returns the required padding for "same" style convolutions
def get_same_padding_for_kernel_size(kernel_size): """ Returns the required padding for "same" style convolutions """ if kernel_size % 2 == 0: raise ValueError(f"Only odd sized kernels are supported, got {kernel_size}") return (kernel_size - 1) // 2
Registers a :class:`ClassyModel` subclass. This decorator allows Classy Vision to instantiate a subclass of :class:`ClassyModel` from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyModel subclass, like this: .. code-block:: python @register_model('resnet') class ResidualNet(ClassyModel): ... To instantiate a model from a configuration file, see :func:`build_model`.
def register_model(name, bypass_checks=False): """Registers a :class:`ClassyModel` subclass. This decorator allows Classy Vision to instantiate a subclass of :class:`ClassyModel` from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyModel subclass, like this: .. code-block:: python @register_model('resnet') class ResidualNet(ClassyModel): ... To instantiate a model from a configuration file, see :func:`build_model`.""" def register_model_cls(cls): if not bypass_checks: if name in MODEL_REGISTRY: msg = ( "Cannot register duplicate model ({}). Already registered at \n{}\n" ) raise ValueError(msg.format(name, MODEL_REGISTRY_TB[name])) if not issubclass(cls, ClassyModel): raise ValueError( "Model ({}: {}) must extend ClassyModel".format(name, cls.__name__) ) if cls.__name__ in MODEL_CLASS_NAMES: msg = ( "Cannot register model with duplicate class name({})." + "Previously registered at \n{}\n" ) raise ValueError( msg.format(cls.__name__, MODEL_CLASS_NAMES_TB[cls.__name__]) ) tb = "".join(traceback.format_stack()) MODEL_REGISTRY[name] = cls MODEL_CLASS_NAMES.add(cls.__name__) MODEL_REGISTRY_TB[name] = tb MODEL_CLASS_NAMES_TB[cls.__name__] = tb return cls return register_model_cls
Builds a ClassyModel from a config. This assumes a 'name' key in the config which is used to determine what model class to instantiate. For instance, a config `{"name": "my_model", "foo": "bar"}` will find a class that was registered as "my_model" (see :func:`register_model`) and call .from_config on it.
def build_model(config): """Builds a ClassyModel from a config. This assumes a 'name' key in the config which is used to determine what model class to instantiate. For instance, a config `{"name": "my_model", "foo": "bar"}` will find a class that was registered as "my_model" (see :func:`register_model`) and call .from_config on it.""" assert config["name"] in MODEL_REGISTRY, f"unknown model: {config['name']}" model = MODEL_REGISTRY[config["name"]].from_config(config) if "heads" in config: heads = defaultdict(list) for head_config in config["heads"]: assert "fork_block" in head_config, "Expect fork_block in config" fork_block = head_config["fork_block"] updated_config = copy.deepcopy(head_config) del updated_config["fork_block"] head = build_head(updated_config) heads[fork_block].append(head) model.set_heads(heads) return model
Builds a ClassyOptimizer from a config. This assumes a 'name' key in the config which is used to determine what optimizer class to instantiate. For instance, a config `{"name": "my_optimizer", "foo": "bar"}` will find a class that was registered as "my_optimizer" (see :func:`register_optimizer`) and call .from_config on it. Also builds the param schedulers passed in the config and associates them with the optimizer. The config should contain an optional "param_schedulers" key containing a dictionary of param scheduler configs, keyed by the parameter they control. Adds "num_epochs" to each of the scheduler configs and then calls :func:`build_param_scheduler` on each config in the dictionary.
def build_optimizer(config): """Builds a ClassyOptimizer from a config. This assumes a 'name' key in the config which is used to determine what optimizer class to instantiate. For instance, a config `{"name": "my_optimizer", "foo": "bar"}` will find a class that was registered as "my_optimizer" (see :func:`register_optimizer`) and call .from_config on it. Also builds the param schedulers passed in the config and associates them with the optimizer. The config should contain an optional "param_schedulers" key containing a dictionary of param scheduler configs, keyed by the parameter they control. Adds "num_epochs" to each of the scheduler configs and then calls :func:`build_param_scheduler` on each config in the dictionary. """ return OPTIMIZER_REGISTRY[config["name"]].from_config(config)
Registers a ClassyOptimizer subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyOptimizer from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyOptimizer subclass, like this: .. code-block:: python @register_optimizer('my_optimizer') class MyOptimizer(ClassyOptimizer): ... To instantiate an optimizer from a configuration file, see :func:`build_optimizer`.
def register_optimizer(name, bypass_checks=False): """Registers a ClassyOptimizer subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyOptimizer from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyOptimizer subclass, like this: .. code-block:: python @register_optimizer('my_optimizer') class MyOptimizer(ClassyOptimizer): ... To instantiate an optimizer from a configuration file, see :func:`build_optimizer`.""" def register_optimizer_cls(cls): if not bypass_checks: if name in OPTIMIZER_REGISTRY: msg = "Cannot register duplicate optimizer ({}). Already registered at \n{}\n" raise ValueError(msg.format(name, OPTIMIZER_REGISTRY_TB[name])) if not issubclass(cls, ClassyOptimizer): raise ValueError( "Optimizer ({}: {}) must extend ClassyVisionOptimizer".format( name, cls.__name__ ) ) if cls.__name__ in OPTIMIZER_CLASS_NAMES: msg = ( "Cannot register optimizer with duplicate class name({})." + "Previously registered at \n{}\n" ) raise ValueError( msg.format(cls.__name__, OPTIMIZER_CLASS_NAMES_TB[cls.__name__]) ) tb = "".join(traceback.format_stack()) OPTIMIZER_REGISTRY[name] = cls OPTIMIZER_CLASS_NAMES.add(cls.__name__) OPTIMIZER_REGISTRY_TB[name] = tb OPTIMIZER_CLASS_NAMES_TB[cls.__name__] = tb return cls return register_optimizer_cls
Add back the following functionalities to the fvcore schedulers: 1. Add `from_config` classmethod that constructs the scheduler from a dict 2. Add `update_interval` attribute 3. Add the class to the scheduler registry
def _create_classy_scheduler_class(base_class, register_name, default_update_interval): """ Add back the following functionalities to the fvcore schedulers: 1. Add `from_config` classmethod that constructs the scheduler from a dict 2. Add `update_interval` attribute 3. Add the class to the scheduler registry """ def from_config(cls, config: Dict[str, Any]) -> param_scheduler.ParamScheduler: config = copy.copy(config) assert register_name == config.pop("name") update_interval = UpdateInterval.from_config(config, default_update_interval) param_names = inspect.signature(base_class).parameters.keys() # config might contain values that are not used by constructor kwargs = {p: config[p] for p in param_names if p in config} # This argument was renamed when moving to fvcore if "num_updates" in param_names and "num_epochs" in config: kwargs["num_updates"] = config["num_epochs"] scheduler = cls(**kwargs) scheduler.update_interval = update_interval return scheduler cls = type( base_class.__name__, (base_class, ClassyParamScheduler), { "from_config": classmethod(from_config), "update_interval": default_update_interval, }, ) if hasattr(base_class, "__doc__"): cls.__doc__ = base_class.__doc__.replace("num_updates", "num_epochs") register_param_scheduler(register_name)(cls) return cls
Builds a :class:`ParamScheduler` from a config. This assumes a 'name' key in the config which is used to determine what param scheduler class to instantiate. For instance, a config `{"name": "my_scheduler", "foo": "bar"}` will find a class that was registered as "my_scheduler" (see :func:`register_param_scheduler`) and call .from_config on it.
def build_param_scheduler(config: Dict[str, Any]) -> ParamScheduler: """Builds a :class:`ParamScheduler` from a config. This assumes a 'name' key in the config which is used to determine what param scheduler class to instantiate. For instance, a config `{"name": "my_scheduler", "foo": "bar"}` will find a class that was registered as "my_scheduler" (see :func:`register_param_scheduler`) and call .from_config on it.""" return PARAM_SCHEDULER_REGISTRY[config["name"]].from_config(config)
Registers a :class:`ParamScheduler` subclass. This decorator allows Classy Vision to instantiate a subclass of ParamScheduler from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ParamScheduler subclass that implements a `from_config` classmethod, like this: .. code-block:: python @register_param_scheduler('my_scheduler') class MyParamScheduler(ParamScheduler): ... To instantiate a param scheduler from a configuration file, see :func:`build_param_scheduler`.
def register_param_scheduler(name, bypass_checks=False): """Registers a :class:`ParamScheduler` subclass. This decorator allows Classy Vision to instantiate a subclass of ParamScheduler from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ParamScheduler subclass that implements a `from_config` classmethod, like this: .. code-block:: python @register_param_scheduler('my_scheduler') class MyParamScheduler(ParamScheduler): ... To instantiate a param scheduler from a configuration file, see :func:`build_param_scheduler`.""" def register_param_scheduler_cls(cls): if not bypass_checks: if name in PARAM_SCHEDULER_REGISTRY: msg = "Cannot register duplicate param scheduler ({}). Already registered at \n{}\n" raise ValueError(msg.format(name, PARAM_SCHEDULER_REGISTRY_TB[name])) if not issubclass(cls, ParamScheduler): raise ValueError( "Param Scheduler ({}: {}) must extend ParamScheduler".format( name, cls.__name__ ) ) tb = "".join(traceback.format_stack()) PARAM_SCHEDULER_REGISTRY[name] = cls PARAM_SCHEDULER_REGISTRY_TB[name] = tb return cls return register_param_scheduler_cls
Builds a ClassyTask from a config. This assumes a 'name' key in the config which is used to determine what task class to instantiate. For instance, a config `{"name": "my_task", "foo": "bar"}` will find a class that was registered as "my_task" (see :func:`register_task`) and call .from_config on it.
def build_task(config): """Builds a ClassyTask from a config. This assumes a 'name' key in the config which is used to determine what task class to instantiate. For instance, a config `{"name": "my_task", "foo": "bar"}` will find a class that was registered as "my_task" (see :func:`register_task`) and call .from_config on it.""" task = TASK_REGISTRY[config["name"]].from_config(config) return task
Registers a ClassyTask subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyTask from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyTask subclass, like this: .. code-block:: python @register_task('my_task') class MyTask(ClassyTask): ... To instantiate a task from a configuration file, see :func:`build_task`.
def register_task(name): """Registers a ClassyTask subclass. This decorator allows Classy Vision to instantiate a subclass of ClassyTask from a configuration file, even if the class itself is not part of the Classy Vision framework. To use it, apply this decorator to a ClassyTask subclass, like this: .. code-block:: python @register_task('my_task') class MyTask(ClassyTask): ... To instantiate a task from a configuration file, see :func:`build_task`.""" def register_task_cls(cls): if name in TASK_REGISTRY: msg = "Cannot register duplicate task ({}). Already registered at \n{}\n" raise ValueError(msg.format(name, TASK_REGISTRY_TB[name])) if not issubclass(cls, ClassyTask): raise ValueError( "Task ({}: {}) must extend ClassyTask".format(name, cls.__name__) ) if cls.__name__ in TASK_CLASS_NAMES: msg = ( "Cannot register task with duplicate class name({})." + "Previously registered at \n{}\n" ) raise ValueError( msg.format(cls.__name__, TASK_CLASS_NAMES_TB[cls.__name__]) ) tb = "".join(traceback.format_stack()) TASK_REGISTRY[name] = cls TASK_CLASS_NAMES.add(cls.__name__) TASK_REGISTRY_TB[name] = tb TASK_CLASS_NAMES_TB[cls.__name__] = tb return cls return register_task_cls
Function sets up default environment variables for distributed training. Args: use_gpu: If true, set NCCL environment for GPUs
def _init_env_vars(use_gpu: bool): """Function sets up default environment variables for distributed training. Args: use_gpu: If true, set NCCL environment for GPUs """ if "WORLD_SIZE" not in os.environ or "RANK" not in os.environ: os.environ["WORLD_SIZE"] = "1" os.environ["RANK"] = "0" os.environ["LOCAL_RANK"] = "0" if "MASTER_ADDR" not in os.environ or "MASTER_PORT" not in os.environ: os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "29500" if use_gpu: # From https://github.com/pytorch/elastic/blob/4175e9ec3ac346b89dab13eeca00e8f00b6daa8f/examples/imagenet/main.py#L156 # noqa B950 # when using NCCL, on failures, surviving nodes will deadlock on NCCL ops # because NCCL uses a spin-lock on the device. Set this env var to enable a # watchdog thread that will destroy stale NCCL communicators, and # asynchronously handle NCCL errors and timed out collectives. os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
Function perform distributed setup for DDP. Requires the script to be started with torch.distributed.launch script and uses environment variables for node finding. Args: use_gpu: If true, use distributed GPU training, else use CPU
def _init_distributed(use_gpu: bool): """Function perform distributed setup for DDP. Requires the script to be started with torch.distributed.launch script and uses environment variables for node finding. Args: use_gpu: If true, use distributed GPU training, else use CPU """ distributed_world_size = int(os.environ["WORLD_SIZE"]) distributed_rank = int(os.environ["RANK"]) backend = "nccl" if use_gpu else "gloo" torch.distributed.init_process_group( backend=backend, init_method="env://", world_size=distributed_world_size, rank=distributed_rank, )
Generate HTML tutorials for captum Docusaurus site from Jupyter notebooks. Also create ipynb and py versions of tutorial in Docusaurus site for download.
def gen_tutorials(repo_dir: str) -> None: """Generate HTML tutorials for captum Docusaurus site from Jupyter notebooks. Also create ipynb and py versions of tutorial in Docusaurus site for download. """ with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile: tutorial_config = json.loads(infile.read()) tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v} for tid in tutorial_ids: print("Generating {} tutorial".format(tid)) # convert notebook to HTML ipynb_in_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid)) with open(ipynb_in_path, "r") as infile: nb_str = infile.read() nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) # displayname is absent from notebook metadata nb["metadata"]["kernelspec"]["display_name"] = "python3" exporter = HTMLExporter() html, meta = exporter.from_notebook_node(nb) # pull out html div for notebook soup = BeautifulSoup(html, "html.parser") nb_meat = soup.find("body", {"class": "jp-Notebook"}) nb_meat.attrs["class"] = ["notebook"] html_out = JS_SCRIPTS + str(nb_meat) # generate html file html_out_path = os.path.join( repo_dir, "website", "_tutorials", "{}.html".format(tid) ) with open(html_out_path, "w") as html_outfile: html_outfile.write(html_out) # generate JS file script = TEMPLATE.format(tid) js_out_path = os.path.join( repo_dir, "website", "pages", "tutorials", "{}.js".format(tid) ) with open(js_out_path, "w") as js_outfile: js_outfile.write(script) # output tutorial in both ipynb & py form ipynb_out_path = os.path.join( repo_dir, "website", "static", "files", "{}.ipynb".format(tid) ) with open(ipynb_out_path, "w") as ipynb_outfile: ipynb_outfile.write(nb_str) exporter = ScriptExporter() script, meta = exporter.from_notebook_node(nb) py_out_path = os.path.join( repo_dir, "website", "static", "files", "{}.py".format(tid) ) with open(py_out_path, "w") as py_outfile: py_outfile.write(script)
This helper function takes a sample, makes a copy, applies the provided transform to the appropriate key in the copied sample and returns the copy. It's solely to help make sure the copying / random seed happens correctly throughout the file. It is useful for constructing the expected sample field in the transform checks.
def _apply_transform_to_key_and_copy(sample, transform, key, seed=0): """ This helper function takes a sample, makes a copy, applies the provided transform to the appropriate key in the copied sample and returns the copy. It's solely to help make sure the copying / random seed happens correctly throughout the file. It is useful for constructing the expected sample field in the transform checks. """ expected_sample = copy.deepcopy(sample) torch.manual_seed(seed) numpy.random.seed(seed) random.seed(seed) is_tuple = False if isinstance(expected_sample, tuple): expected_sample = list(expected_sample) is_tuple = True expected_sample[key] = transform(expected_sample[key]) return tuple(expected_sample) if is_tuple else expected_sample
Parses the csv file and returns number of rows
def parse_csv(file_path): """Parses the csv file and returns number of rows""" num_rows = 0 with open(file_path, "r", newline="") as csvfile: reader = csv.DictReader(csvfile, delimiter="\t") for _ in reader: num_rows += 1 return num_rows
Find the full path for a given block name e.g. block3-1 --> 3.block3-1
def _find_block_full_path(model, block_name): """Find the full path for a given block name e.g. block3-1 --> 3.block3-1 """ for name, _ in model.named_modules(): if name.endswith(block_name): return name return None
Find the full path for a given block name e.g. block3-1 --> 3.block3-1
def _find_block_full_path(model, block_name): """Find the full path for a given block name e.g. block3-1 --> 3.block3-1 """ for name, _ in model.named_modules(): if name.endswith(block_name): return name return None
Allow for parallelism in CircleCI for speedier tests..
def _circleci_parallelism(suite): """Allow for parallelism in CircleCI for speedier tests..""" if int(os.environ.get("CIRCLE_NODE_TOTAL", 0)) <= 1: # either not running on circleci, or we're not using parallelism. return suite # tests are automatically sorted by discover, so we will get the same ordering # on all hosts. total = int(os.environ["CIRCLE_NODE_TOTAL"]) index = int(os.environ["CIRCLE_NODE_INDEX"]) # right now each test is corresponds to a /file/. Certain files are slower than # others, so we want to flatten it tests = [testfile._tests for testfile in suite._tests] tests = list(chain.from_iterable(tests)) random.Random(42).shuffle(tests) tests = [t for i, t in enumerate(tests) if i % total == index] return unittest.TestSuite(tests)
Short tests. Runs on CircleCI on every commit. Returns everything in the tests root directory.
def unittests(): """ Short tests. Runs on CircleCI on every commit. Returns everything in the tests root directory. """ test_loader = unittest.TestLoader() test_suite = test_loader.discover("test", pattern="*_test.py") test_suite = _circleci_parallelism(test_suite) return test_suite
Decorator that can be used to skip GPU tests on non-GPU machines.
def skip_if_no_gpu(func): """Decorator that can be used to skip GPU tests on non-GPU machines.""" func.skip_if_no_gpu = True @wraps(func) def wrapper(*args, **kwargs): if not torch.cuda.is_available(): return if torch.cuda.device_count() <= 0: return return func(*args, **kwargs) return wrapper
Decorator that can be used to repeat test multiple times.
def repeat_test(original_function=None, *, num_times=3): """Decorator that can be used to repeat test multiple times.""" def repeat_test_decorator(func): @wraps(func) def wrapper(*args, **kwargs): for _ in range(num_times): func(*args, **kwargs) return wrapper # this handles default arguments to decorator: if original_function: return repeat_test_decorator(original_function) return repeat_test_decorator
Makes Torch code run deterministically.
def make_torch_deterministic(seed=0): """Makes Torch code run deterministically.""" torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) os.environ["MKL_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1"
Compare two batches. Does not do recursive comparison
def compare_batches(test_fixture, batch1, batch2): """Compare two batches. Does not do recursive comparison""" test_fixture.assertEqual(type(batch1), type(batch2)) if isinstance(batch1, (tuple, list)): test_fixture.assertEqual(len(batch1), len(batch2)) for n in range(len(batch1)): value1 = batch1[n] value2 = batch2[n] test_fixture.assertEqual(type(value1), type(value2)) if torch.is_tensor(value1): test_fixture.assertTrue(torch.allclose(value1, value2)) else: test_fixture.assertEqual(value1, value2) elif isinstance(batch1, dict): test_fixture.assertEqual(batch1.keys(), batch2.keys()) for key, value1 in batch1.items(): value2 = batch2[key] test_fixture.assertEqual(type(value1), type(value2)) if torch.is_tensor(value1): test_fixture.assertTrue(torch.allclose(value1, value2)) else: test_fixture.assertEqual(value1, value2)
Compares a list of batches and the dataset. If some samples were skipped in the iterator (i.e. if we simulated an error on that sample), that should be indicated in the skip_indices list
def compare_batchlist_and_dataset_with_skips( test_fixture, batch_list, dataset, skip_indices=None ): """ Compares a list of batches and the dataset. If some samples were skipped in the iterator (i.e. if we simulated an error on that sample), that should be indicated in the skip_indices list """ if skip_indices is None: skip_indices = [] if isinstance(skip_indices, int): skip_indices = [skip_indices] skips = 0 for idx, batch in enumerate(batch_list): while (idx + skips) in skip_indices: skips += 1 dataset_batch = dataset[idx + skips] compare_batches(test_fixture, batch, dataset_batch)
Takes a batch of samples, e.g. batch = {'input': tensor([256, 3, 224, 224]), 'target': tensor([256])} and unpacks them into a list of single samples, e.g. [{'input': tensor([1, 3, 224, 224]), 'target': tensor([1])} ... ]
def recursive_unpack(batch): """ Takes a batch of samples, e.g. batch = {'input': tensor([256, 3, 224, 224]), 'target': tensor([256])} and unpacks them into a list of single samples, e.g. [{'input': tensor([1, 3, 224, 224]), 'target': tensor([1])} ... ] """ new_list = [] if isinstance(batch, dict): unpacked_dict = {} batchsize_per_replica = -1 for key, val in batch.items(): unpacked_dict[key] = recursive_unpack(val) batchsize_per_replica = ( len(unpacked_dict[key]) if not torch.is_tensor(unpacked_dict[key]) else 1 ) for idx in range(batchsize_per_replica): sample = {} for key, val in unpacked_dict.items(): sample[key] = val[idx] new_list.append(sample) return new_list elif isinstance(batch, (list, tuple)): unpacked_list = [] if isinstance(batch, tuple): batch = list(batch) for val in batch: unpacked_list.append(recursive_unpack(val)) batchsize_per_replica = ( len(unpacked_list[0]) if not torch.is_tensor(unpacked_list[0]) else 1 ) for idx in range(batchsize_per_replica): sample = [] for val in unpacked_list: sample.append(val[idx]) if isinstance(batch, tuple): sample = tuple(sample) new_list.append(sample) return new_list elif torch.is_tensor(batch): for i in range(batch.size()[0]): new_list.append(batch[i]) return new_list raise TypeError("Unexpected type %s passed to unpack" % type(batch))
Tests the classy state dicts for equality, but skips the member objects which implement their own {get, set}_classy_state functions.
def compare_states(test_fixture, state_1, state_2, check_heads=True): """ Tests the classy state dicts for equality, but skips the member objects which implement their own {get, set}_classy_state functions. """ # check base_model compare_model_state( test_fixture, state_1["base_model"], state_2["base_model"], check_heads ) # check losses test_fixture.assertEqual(len(state_1["losses"]), len(state_2["losses"])) for loss_1, loss_2 in zip(state_1["losses"], state_2["losses"]): test_fixture.assertAlmostEqual(loss_1, loss_2) for key in ["base_model", "meters", "optimizer", "losses"]: # we trust that these have been tested using their unit tests or # by the code above test_fixture.assertIn(key, state_1) test_fixture.assertIn(key, state_2) del state_1[key] del state_2[key] test_fixture.assertDictEqual(state_1, state_2)
>>> contributors = {'Alice': new_person(github='alice', twitter='alice')} >>> merge_all_the_people('2.6.0', contributors, {}, {}) >>> contributors {'Alice': {'committed': [], 'reported': [], 'github': 'alice', 'twitter': 'alice'}} >>> contributors = {'Bob': new_person(github='bob', twitter='bob')} >>> merge_all_the_people('2.6.0', contributors, {'Bob'}, {'bob'}) >>> contributors {'Bob': {'committed': ['2.6.0'], 'reported': ['2.6.0'], 'github': 'bob', 'twitter': 'bob'}} >>> contributors = {'Charlotte': new_person(github='charlotte', twitter='charlotte', committed=['2.5.0'], reported=['2.5.0'])} >>> merge_all_the_people('2.6.0', contributors, {'Charlotte'}, {'charlotte'}) >>> contributors {'Charlotte': {'committed': ['2.5.0', '2.6.0'], 'reported': ['2.5.0', '2.6.0'], 'github': 'charlotte', 'twitter': 'charlotte'}}
def merge_all_the_people(release: str, contributors: People, committers: FullNames, reporters: GitHubLogins) -> None: """ >>> contributors = {'Alice': new_person(github='alice', twitter='alice')} >>> merge_all_the_people('2.6.0', contributors, {}, {}) >>> contributors {'Alice': {'committed': [], 'reported': [], 'github': 'alice', 'twitter': 'alice'}} >>> contributors = {'Bob': new_person(github='bob', twitter='bob')} >>> merge_all_the_people('2.6.0', contributors, {'Bob'}, {'bob'}) >>> contributors {'Bob': {'committed': ['2.6.0'], 'reported': ['2.6.0'], 'github': 'bob', 'twitter': 'bob'}} >>> contributors = {'Charlotte': new_person(github='charlotte', twitter='charlotte', committed=['2.5.0'], reported=['2.5.0'])} >>> merge_all_the_people('2.6.0', contributors, {'Charlotte'}, {'charlotte'}) >>> contributors {'Charlotte': {'committed': ['2.5.0', '2.6.0'], 'reported': ['2.5.0', '2.6.0'], 'github': 'charlotte', 'twitter': 'charlotte'}} """ # Update known contributors. for name, details in contributors.items(): if name in committers: if release not in details['committed']: details['committed'].append(release) committers.remove(name) if details['github'] in reporters: if release not in details['reported']: details['reported'].append(release) reporters.remove(details['github']) # Add new committers. for name in committers: user_info = user(fullname=name) contributors[name] = new_person( github=user_info['login'], twitter=user_info['twitter_username'], committed=[release], ) if user_info['login'] in reporters: contributors[name]['reported'].append(release) reporters.remove(user_info['login']) # Add new reporters. for github_username in reporters: user_info = user(github_username=github_username) contributors[user_info['name'] or user_info['login']] = new_person( github=github_username, twitter=user_info['twitter_username'], reported=[release], )