response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Delete scene_id from all directories.
Args:
directories: directories to check
scene_id: scene to delete | def delete_scene(directories: list[str], scene_id: str) -> None:
"""Delete scene_id from all directories.
Args:
directories: directories to check
scene_id: scene to delete
"""
print(f"Removing {scene_id}")
for directory in directories:
scene = os.path.join(directory, scene_id)
if os.path.exists(scene):
shutil.rmtree(scene) |
Retrieve CONUS MultiPolygon.
Args:
download_root: directory where to store usa shape file
Returns:
MultiPolygon of CONUS | def retrieve_rois_polygons(download_root: str) -> MultiPolygon:
"""Retrieve CONUS MultiPolygon.
Args:
download_root: directory where to store usa shape file
Returns:
MultiPolygon of CONUS
"""
state_url = "https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_us_state_5m.zip"
state_filename = "cb_2018_us_state_5m.shp"
download_and_extract_archive(state_url, download_root)
excluded_states = [
"United States Virgin Islands",
"Commonwealth of the Northern Mariana Islands",
"Puerto Rico",
"Alaska",
"Hawaii",
"American Samoa",
"Guam",
]
conus = []
with fiona.open(os.path.join(download_root, state_filename), "r") as shapefile:
for feature in shapefile:
name = feature["properties"]["NAME"]
if name in excluded_states:
continue
else:
conus.append(shape(feature["geometry"]))
conus = unary_union(conus)
return conus |
Retrieve the mask for a given landsat image.
Args:
img_src: input image for which to find a corresponding chip
mask_src: CRS aligned mask from which to retrieve a chip
corresponding to img_src
Returns:
mask array | def retrieve_mask_chip(
img_src: DatasetReader, mask_src: DatasetReader
) -> "np.typing.NDArray[np.uint8]":
"""Retrieve the mask for a given landsat image.
Args:
img_src: input image for which to find a corresponding chip
mask_src: CRS aligned mask from which to retrieve a chip
corresponding to img_src
Returns:
mask array
"""
out_shape = (1, *img_src.shape)
mask_chip: "np.typing.NDArray[np.uint8]" = mask_src.read(
out_shape=out_shape, window=from_bounds(*img_src.bounds, mask_src.transform)
)
# Copy nodata pixels from image to mask (Landsat 7 ETM+ SLC-off only)
if "LE07" in img_src.files[0]:
img_chip = img_src.read(1)
mask_chip[0][img_chip == 0] = 0
return mask_chip |
Set up the argument parser.
Returns:
the argument parser | def set_up_parser() -> argparse.ArgumentParser:
"""Set up the argument parser.
Returns:
the argument parser
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--landsat-root",
default=os.path.join("data", "landsat"),
help="directory containing Landsat data",
metavar="ROOT",
)
parser.add_argument(
"--cdl-root",
default=os.path.join("data", "cdl"),
help="directory containing CDL data",
metavar="ROOT",
)
parser.add_argument(
"-d", "--device", default=0, type=int, help="CPU/GPU ID to use", metavar="ID"
)
parser.add_argument(
"-c",
"--cache",
action="store_true",
help="cache file handles during data loading",
)
parser.add_argument(
"-b",
"--batch-size",
default=2**4,
type=int,
help="number of samples in each mini-batch",
metavar="SIZE",
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-n",
"--num-batches",
type=int,
help="number of batches to load",
metavar="SIZE",
)
group.add_argument(
"-e",
"--epoch-size",
type=int,
help="number of samples to load, should be evenly divisible by batch size",
metavar="SIZE",
)
parser.add_argument(
"-p",
"--patch-size",
default=224,
type=int,
help="height/width of each patch in pixels",
metavar="PIXELS",
)
parser.add_argument(
"-s",
"--stride",
default=112,
type=int,
help="sampling stride for GridGeoSampler in pixels",
metavar="PIXELS",
)
parser.add_argument(
"-w",
"--num-workers",
default=0,
type=int,
help="number of workers for parallel data loading",
metavar="NUM",
)
parser.add_argument(
"--seed", default=0, type=int, help="random seed for reproducibility"
)
parser.add_argument(
"--output-fn",
default="benchmark-results.csv",
type=str,
help="path to the CSV file to write results",
metavar="FILE",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="print results to stdout"
)
return parser |
High-level pipeline.
Benchmarks performance of various samplers with and without caching.
Args:
args: command-line arguments | def main(args: argparse.Namespace) -> None:
"""High-level pipeline.
Benchmarks performance of various samplers with and without caching.
Args:
args: command-line arguments
"""
bands = ["B1", "B2", "B3", "B4", "B5", "B6", "B7"]
# Benchmark samplers
# Initialize datasets
cdl = CDL(args.cdl_root, cache=args.cache)
landsat = Landsat8(
args.landsat_root, crs=cdl.crs, res=cdl.res, cache=args.cache, bands=bands
)
dataset = landsat & cdl
# Initialize samplers
if args.epoch_size:
length = args.epoch_size
num_batches = args.epoch_size // args.batch_size
elif args.num_batches:
length = args.num_batches * args.batch_size
num_batches = args.num_batches
samplers = [
RandomGeoSampler(landsat, size=args.patch_size, length=length),
GridGeoSampler(landsat, size=args.patch_size, stride=args.stride),
RandomBatchGeoSampler(
landsat, size=args.patch_size, batch_size=args.batch_size, length=length
),
]
results_rows = []
for sampler in samplers:
if args.verbose:
print(f"\n{sampler.__class__.__name__}:")
if isinstance(sampler, RandomBatchGeoSampler):
dataloader = DataLoader(
dataset,
batch_sampler=sampler,
num_workers=args.num_workers,
collate_fn=stack_samples,
)
else:
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=args.num_workers,
collate_fn=stack_samples,
)
tic = time.time()
num_total_patches = 0
for i, batch in enumerate(dataloader):
num_total_patches += args.batch_size
# This is to stop the GridGeoSampler from enumerating everything
if i == num_batches - 1:
break
toc = time.time()
duration = toc - tic
if args.verbose:
print(f" duration: {duration:.3f} sec")
print(f" count: {num_total_patches} patches")
print(f" rate: {num_total_patches / duration:.3f} patches/sec")
if args.cache:
if args.verbose:
print(landsat._cached_load_warp_file.cache_info())
# Clear cache for fair comparison between samplers
# Both `landsat` and `cdl` share the same cache
landsat._cached_load_warp_file.cache_clear()
results_rows.append(
{
"cached": args.cache,
"seed": args.seed,
"duration": duration,
"count": num_total_patches,
"rate": num_total_patches / duration,
"sampler": sampler.__class__.__name__,
"batch_size": args.batch_size,
"num_workers": args.num_workers,
}
)
# Benchmark model
model = resnet34()
# Change number of input channels to match Landsat
model.conv1 = nn.Conv2d(
len(bands), 64, kernel_size=7, stride=2, padding=3, bias=False
)
criterion = nn.CrossEntropyLoss()
params = model.parameters()
optimizer = optim.SGD(params, lr=0.0001)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", args.device)
model = model.to(device)
tic = time.time()
num_total_patches = 0
for _ in range(num_batches):
num_total_patches += args.batch_size
x = torch.rand(args.batch_size, len(bands), args.patch_size, args.patch_size)
# y = torch.randint(0, 256, (args.batch_size, args.patch_size, args.patch_size))
y = torch.randint(0, 256, (args.batch_size,))
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
prediction = model(x)
loss = criterion(prediction, y)
loss.backward()
optimizer.step()
toc = time.time()
duration = toc - tic
if args.verbose:
print("\nResNet-34:")
print(f" duration: {duration:.3f} sec")
print(f" count: {num_total_patches} patches")
print(f" rate: {num_total_patches / duration:.3f} patches/sec")
results_rows.append(
{
"cached": args.cache,
"seed": args.seed,
"duration": duration,
"count": num_total_patches,
"rate": num_total_patches / duration,
"sampler": "ResNet-34",
"batch_size": args.batch_size,
"num_workers": args.num_workers,
}
)
fieldnames = [
"cached",
"seed",
"duration",
"count",
"rate",
"sampler",
"batch_size",
"num_workers",
]
if not os.path.exists(args.output_fn):
with open(args.output_fn, "w") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
with open(args.output_fn, "a") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerows(results_rows) |
Recursive defaultdict.
Returns:
a nested dictionary | def nested_dict() -> defaultdict[str, defaultdict]: # type: ignore[type-arg]
"""Recursive defaultdict.
Returns:
a nested dictionary
"""
return defaultdict(nested_dict) |
Set up the argument parser.
Returns:
the argument parser | def set_up_parser() -> argparse.ArgumentParser:
"""Set up the argument parser.
Returns:
the argument parser
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--input-dir",
required=True,
type=str,
help="directory containing the experiment run directories",
metavar="ROOT",
)
parser.add_argument(
"--chesapeakecvpr-root",
required=True,
type=str,
help="directory containing the ChesapeakeCVPR dataset",
metavar="ROOT",
)
parser.add_argument(
"--output-fn",
default="chesapeakecvpr-results.csv",
type=str,
help="path to the CSV file to write results",
metavar="FILE",
)
parser.add_argument(
"-d",
"--device",
default=0,
type=int,
help="GPU ID to use, ignored if no GPUs are available",
metavar="ID",
)
return parser |
High-level pipeline.
Args:
args: command-line arguments | def main(args: argparse.Namespace) -> None:
"""High-level pipeline.
Args:
args: command-line arguments
"""
if os.path.exists(args.output_fn):
print(f"The output file {args.output_fn} already exists, exiting...")
return
# Set up the result file
fieldnames = [
"train-state",
"model",
"learning-rate",
"initialization",
"loss",
"test-state",
"acc",
"iou",
]
with open(args.output_fn, "w") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# Test loop
trainer = Trainer(
accelerator="auto",
devices=[args.device],
logger=False,
enable_progress_bar=False,
enable_checkpointing=False,
)
for experiment_dir in os.listdir(args.input_dir):
checkpoint_fn = None
for fn in os.listdir(os.path.join(args.input_dir, experiment_dir)):
if fn.startswith("epoch") and fn.endswith(".ckpt"):
checkpoint_fn = fn
break
if checkpoint_fn is None:
print(
f"Skipping {os.path.join(args.input_dir, experiment_dir)} as we are not"
+ " able to find a checkpoint file"
)
continue
checkpoint_fn = os.path.join(args.input_dir, experiment_dir, checkpoint_fn)
try:
model = SemanticSegmentationTask.load_from_checkpoint(checkpoint_fn)
model.freeze()
model.eval()
except KeyError:
print(
f"Skipping {experiment_dir} as we are not able to load a valid"
+ f" SemanticSegmentationTask from {checkpoint_fn}"
)
continue
try:
experiment_dir_parts = experiment_dir.split("_")
train_state = experiment_dir_parts[0]
model_name = experiment_dir_parts[1]
learning_rate = experiment_dir_parts[2]
loss = experiment_dir_parts[3]
initialization = "random" if len(experiment_dir_parts) == 5 else "imagenet"
except IndexError:
print(
f"Skipping {experiment_dir} as the directory name is not in the"
+ " expected format"
)
continue
# Test the loaded model against the test set from all states
for test_splits in ALL_TEST_SPLITS:
dm = ChesapeakeCVPRDataModule(
root=args.chesapeakecvpr_root,
train_splits=["de-train"],
val_splits=["de-val"],
test_splits=test_splits,
batch_size=32,
num_workers=8,
class_set=5,
)
results = trainer.test(model=model, datamodule=dm, verbose=False)
print(experiment_dir, test_splits, results[0])
row = {
"train-state": train_state,
"model": model_name,
"learning-rate": learning_rate,
"initialization": initialization,
"loss": loss,
"test-state": "_".join(test_splits),
"acc": results[0]["test_Accuracy"],
"iou": results[0]["test_IoU"],
}
with open(args.output_fn, "a") as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow(row) |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Process for each ID in GPUS. | def do_work(work: "Queue[str]", gpu_idx: int) -> bool:
"""Process for each ID in GPUS."""
while not work.empty():
experiment = work.get()
experiment = experiment.replace("GPU", str(gpu_idx))
print(experiment)
if not DRY_RUN:
subprocess.call(experiment.split(" "))
return True |
Create test data archive for AgriFieldNet dataset.
Args:
paths: path to store test data
n_samples: number of samples.
Returns:
md5 hash of created archive | def generate_test_data(paths: str) -> str:
"""Create test data archive for AgriFieldNet dataset.
Args:
paths: path to store test data
n_samples: number of samples.
Returns:
md5 hash of created archive
"""
dtype = np.uint8
dtype_max = np.iinfo(dtype).max
SIZE = 32
np.random.seed(0)
bands = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B8A",
"B09",
"B11",
"B12",
)
profile = {
"dtype": dtype,
"width": SIZE,
"height": SIZE,
"count": 1,
"crs": CRS.from_epsg(32644),
"transform": Affine(10.0, 0.0, 535840.0, 0.0, -10.0, 3079680.0),
}
source_dir = os.path.join(paths, "source")
train_mask_dir = os.path.join(paths, "train_labels")
test_field_dir = os.path.join(paths, "test_labels")
os.makedirs(source_dir, exist_ok=True)
os.makedirs(train_mask_dir, exist_ok=True)
os.makedirs(test_field_dir, exist_ok=True)
source_unique_folder_ids = ["32407", "8641e", "a419f", "eac11", "ff450"]
train_folder_ids = source_unique_folder_ids[0:5]
test_folder_ids = source_unique_folder_ids[3:5]
for id in source_unique_folder_ids:
directory = os.path.join(
source_dir, "ref_agrifieldnet_competition_v1_source_" + id
)
os.makedirs(directory, exist_ok=True)
for band in bands:
train_arr = np.random.randint(dtype_max, size=(SIZE, SIZE), dtype=dtype)
path = os.path.join(
directory, f"ref_agrifieldnet_competition_v1_source_{id}_{band}_10m.tif"
)
with rasterio.open(path, "w", **profile) as src:
src.write(train_arr, 1)
for id in train_folder_ids:
train_mask_arr = np.random.randint(size=(SIZE, SIZE), low=0, high=6)
path = os.path.join(
train_mask_dir, f"ref_agrifieldnet_competition_v1_labels_train_{id}.tif"
)
with rasterio.open(path, "w", **profile) as src:
src.write(train_mask_arr, 1)
train_field_arr = np.random.randint(20, size=(SIZE, SIZE), dtype=np.uint16)
path = os.path.join(
train_mask_dir,
f"ref_agrifieldnet_competition_v1_labels_train_{id}_field_ids.tif",
)
with rasterio.open(path, "w", **profile) as src:
src.write(train_field_arr, 1)
for id in test_folder_ids:
test_field_arr = np.random.randint(10, 30, size=(SIZE, SIZE), dtype=np.uint16)
path = os.path.join(
test_field_dir,
f"ref_agrifieldnet_competition_v1_labels_test_{id}_field_ids.tif",
)
with rasterio.open(path, "w", **profile) as src:
src.write(test_field_arr, 1) |
Create S1 or S2 data with num channels.
Args:
path: path where to save tif
num_channels: number of channels (4 for S1, 11 for S2)
dtype: uint16 for image data and float 32 for target | def create_tif_file(path: str, num_channels: int, dtype: str) -> None:
"""Create S1 or S2 data with num channels.
Args:
path: path where to save tif
num_channels: number of channels (4 for S1, 11 for S2)
dtype: uint16 for image data and float 32 for target
"""
profile = {}
profile["driver"] = "GTiff"
profile["dtype"] = dtype
profile["count"] = num_channels
profile["crs"] = "epsg:4326"
profile["transform"] = rasterio.transform.from_bounds(0, 0, 1, 1, 1, 1)
profile["height"] = SIZE
profile["width"] = SIZE
profile["compress"] = "lzw"
profile["predictor"] = 2
if "float" in profile["dtype"]:
Z = np.random.randn(SIZE, SIZE).astype(profile["dtype"])
else:
Z = np.random.randint(
np.iinfo(profile["dtype"]).max, size=(SIZE, SIZE), dtype=profile["dtype"]
)
with rasterio.open(path, "w", **profile) as src:
for i in range(1, profile["count"] + 1):
src.write(Z, i) |
Create test data archive for DeepGlobeLandCover dataset.
Args:
root: path to store test data
n_samples: number of samples.
Returns:
md5 hash of created archive | def generate_test_data(root: str, n_samples: int = 3) -> str:
"""Create test data archive for DeepGlobeLandCover dataset.
Args:
root: path to store test data
n_samples: number of samples.
Returns:
md5 hash of created archive
"""
dtype = np.uint8
size = 2
folder_path = os.path.join(root, "data")
train_img_dir = os.path.join(folder_path, "data", "training_data", "images")
train_mask_dir = os.path.join(folder_path, "data", "training_data", "masks")
test_img_dir = os.path.join(folder_path, "data", "test_data", "images")
test_mask_dir = os.path.join(folder_path, "data", "test_data", "masks")
os.makedirs(train_img_dir, exist_ok=True)
os.makedirs(train_mask_dir, exist_ok=True)
os.makedirs(test_img_dir, exist_ok=True)
os.makedirs(test_mask_dir, exist_ok=True)
train_ids = [1, 2, 3]
test_ids = [8, 9, 10]
for i in range(n_samples):
train_id = train_ids[i]
test_id = test_ids[i]
dtype_max = np.iinfo(dtype).max
train_arr = np.random.randint(dtype_max, size=(size, size, 3), dtype=dtype)
train_img = Image.fromarray(train_arr)
train_img.save(os.path.join(train_img_dir, str(train_id) + "_sat.jpg"))
test_arr = np.random.randint(dtype_max, size=(size, size, 3), dtype=dtype)
test_img = Image.fromarray(test_arr)
test_img.save(os.path.join(test_img_dir, str(test_id) + "_sat.jpg"))
train_mask_arr = np.full((size, size, 3), (0, 255, 255), dtype=dtype)
train_mask_img = Image.fromarray(train_mask_arr)
train_mask_img.save(os.path.join(train_mask_dir, str(train_id) + "_mask.png"))
test_mask_arr = np.full((size, size, 3), (255, 0, 255), dtype=dtype)
test_mask_img = Image.fromarray(test_mask_arr)
test_mask_img.save(os.path.join(test_mask_dir, str(test_id) + "_mask.png"))
# Create archive
shutil.make_archive(folder_path, "zip", folder_path)
shutil.rmtree(folder_path)
return calculate_md5(f"{folder_path}.zip") |
Creates test data archive for the EnviroAtlas dataset and returns its md5 hash.
Args:
root (str): Path to store test data
Returns:
str: md5 hash of created archive | def generate_test_data(root: str) -> str:
"""Creates test data archive for the EnviroAtlas dataset and returns its md5 hash.
Args:
root (str): Path to store test data
Returns:
str: md5 hash of created archive
"""
size = (64, 64)
folder_path = os.path.join(root, "enviroatlas_lotp")
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for prefix in tile_list:
for suffix, data_profile in layer_data_profiles.items():
img_path = os.path.join(folder_path, f"{prefix}_{suffix}.tif")
img_dir = os.path.dirname(img_path)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
data_profile["profile"]["height"] = size[0]
data_profile["profile"]["width"] = size[1]
data_profile["profile"]["transform"] = Affine(
1.0, 0.0, 608170.0, 0.0, -1.0, 3381430.0
)
write_data(
img_path,
data_profile["profile"],
data_profile["data_type"],
data_profile["vals"],
)
# build the spatial index
schema = {
"geometry": "Polygon",
"properties": {
"split": "str",
"naip": "str",
"nlcd": "str",
"roads": "str",
"water": "str",
"waterways": "str",
"waterbodies": "str",
"buildings": "str",
"lc": "str",
"prior_no_osm_no_buildings": "str",
"prior": "str",
},
}
with fiona.open(
os.path.join(folder_path, "spatial_index.geojson"),
"w",
driver="GeoJSON",
crs="EPSG:3857",
schema=schema,
) as dst:
for prefix in tile_list:
img_path = os.path.join(folder_path, f"{prefix}_a_naip.tif")
with rasterio.open(img_path) as f:
geom = shapely.geometry.mapping(shapely.geometry.box(*f.bounds))
geom = fiona.transform.transform_geom(
f.crs.to_string(), "EPSG:3857", geom
)
row = {
"geometry": geom,
"properties": {
"split": prefix.split("/")[0].replace("_tiles-debuffered", "")
},
}
for suffix, data_profile in layer_data_profiles.items():
key = suffix_to_key_map[suffix]
row["properties"][key] = f"{prefix}_{suffix}.tif"
dst.write(row)
# Create archive
archive_path = os.path.join(root, "enviroatlas_lotp")
shutil.make_archive(archive_path, "zip", root_dir=root, base_dir="enviroatlas_lotp")
shutil.rmtree(folder_path)
md5: str = calculate_md5(archive_path + ".zip")
return md5 |
Creates test data archive for InriaAerialImageLabeling dataset and
returns its md5 hash.
Args:
root (str): Path to store test data
n_samples (int, optional): Number of samples. Defaults to 2.
Returns:
str: md5 hash of created archive | def generate_test_data(root: str, n_samples: int = 2) -> str:
"""Creates test data archive for InriaAerialImageLabeling dataset and
returns its md5 hash.
Args:
root (str): Path to store test data
n_samples (int, optional): Number of samples. Defaults to 2.
Returns:
str: md5 hash of created archive
"""
dtype = np.dtype("uint8")
size = (8, 8)
driver = "GTiff"
transform = Affine(0.3, 0.0, 616500.0, 0.0, -0.3, 3345000.0)
crs = CRS.from_epsg(26914)
folder_path = os.path.join(root, "AerialImageDataset")
img_dir = os.path.join(folder_path, "train", "images")
lbl_dir = os.path.join(folder_path, "train", "gt")
timg_dir = os.path.join(folder_path, "test", "images")
if not os.path.exists(img_dir):
os.makedirs(img_dir)
if not os.path.exists(lbl_dir):
os.makedirs(lbl_dir)
if not os.path.exists(timg_dir):
os.makedirs(timg_dir)
for i in range(n_samples):
dtype_max = np.iinfo(dtype).max
img = np.random.randint(dtype_max, size=size, dtype=dtype)
lbl = np.random.randint(2, size=size, dtype=dtype)
timg = np.random.randint(dtype_max, size=size, dtype=dtype)
img_path = os.path.join(img_dir, f"austin{i+1}.tif")
lbl_path = os.path.join(lbl_dir, f"austin{i+1}.tif")
timg_path = os.path.join(timg_dir, f"austin{i+10}.tif")
write_data(img_path, img, driver, crs, transform)
write_data(lbl_path, lbl, driver, crs, transform)
write_data(timg_path, timg, driver, crs, transform)
# Create archive
archive_path = os.path.join(root, "NEW2-AerialImageDataset")
shutil.make_archive(
archive_path, "zip", root_dir=root, base_dir="AerialImageDataset"
)
return calculate_md5(f"{archive_path}.zip") |
Create the testing file. | def create_file(path: str, dtype: str):
"""Create the testing file."""
profile = {
"driver": "GTiff",
"dtype": dtype,
"count": 1,
"crs": CRS.from_epsg(32616),
"transform": Affine(10, 0.0, 399960.0, 0.0, -10, 4500000.0),
"height": SIZE,
"width": SIZE,
"compress": "lzw",
"predictor": 2,
}
allowed_values = [0, 1, 2, 3, 15]
Z = np.random.choice(allowed_values, size=(SIZE, SIZE))
with rasterio.open(path, "w", **profile) as src:
src.write(Z, 1) |
Create the testing file. | def create_file(path: str, dtype: str):
"""Create the testing file."""
profile = {
"driver": "GTiff",
"dtype": dtype,
"count": 1,
"crs": CRS.from_wkt(wkt),
"transform": Affine(30.0, 0.0, -2493045.0, 0.0, -30.0, 3310005.0),
"height": SIZE,
"width": SIZE,
"compress": "lzw",
"predictor": 2,
}
allowed_values = [0, 11, 12, 21, 22, 23, 24, 31, 41, 42, 43, 52, 71, 81, 82, 90, 95]
Z = np.random.choice(allowed_values, size=(SIZE, SIZE))
with rasterio.open(path, "w", **profile) as src:
src.write(Z, 1) |
Write a raster file.
Args:
res: Resolution.
epsg: EPSG of file.
dtype: Data type.
path: File path. | def write_raster(
res: int = RES[0],
epsg: int = EPSG[0],
dtype: str = "uint8",
path: str | None = None,
) -> None:
"""Write a raster file.
Args:
res: Resolution.
epsg: EPSG of file.
dtype: Data type.
path: File path.
"""
size = SIZE // res
profile = {
"driver": "GTiff",
"dtype": dtype,
"count": 1,
"crs": f"epsg:{epsg}",
"transform": from_bounds(0, 0, SIZE, SIZE, size, size),
"height": size,
"width": size,
"nodata": 0,
}
if path is None:
name = f"res_{res}_epsg_{epsg}"
path = os.path.join(name, f"{name}.tif")
directory = os.path.dirname(path)
os.makedirs(directory, exist_ok=True)
with rio.open(path, "w", **profile) as f:
x = np.ones((1, size, size))
f.write(x) |
Reproject a raster file.
Args:
res: Resolution.
src_epsg: EPSG of source file.
dst_epsg: EPSG of destination file. | def reproject_raster(res: int, src_epsg: int, dst_epsg: int) -> None:
"""Reproject a raster file.
Args:
res: Resolution.
src_epsg: EPSG of source file.
dst_epsg: EPSG of destination file.
"""
src_name = f"res_{res}_epsg_{src_epsg}"
src_path = os.path.join(src_name, f"{src_name}.tif")
with rio.open(src_path) as src:
dst_crs = f"epsg:{dst_epsg}"
transform, width, height = calculate_default_transform(
src.crs, dst_crs, src.width, src.height, *src.bounds
)
profile = src.profile.copy()
profile.update(
{"crs": dst_crs, "transform": transform, "width": width, "height": height}
)
dst_name = f"res_{res}_epsg_{dst_epsg}"
os.makedirs(dst_name, exist_ok=True)
dst_path = os.path.join(dst_name, f"{dst_name}.tif")
with rio.open(dst_path, "w", **profile) as dst:
reproject(
source=rio.band(src, 1),
destination=rio.band(dst, 1),
src_transform=src.transform,
src_crs=src.crs,
dst_transform=dst.transform,
dst_crs=dst.crs,
) |
Takes a path to an asset based on type and returns the class label
overview object
Args:
label_type: LabelType - the type of label, either RASTER or VECTOR
asset_path: str - path to the asset to read in either a raster image or
geojson vector
Returns:
overview: LabelOverview - the STAC LabelOverview object containing label classes | def get_item_class_overview(label_type: LabelType, asset_path: str) -> LabelOverview:
"""Takes a path to an asset based on type and returns the class label
overview object
Args:
label_type: LabelType - the type of label, either RASTER or VECTOR
asset_path: str - path to the asset to read in either a raster image or
geojson vector
Returns:
overview: LabelOverview - the STAC LabelOverview object containing label classes
"""
count_list = []
img_arr = rasterio.open(asset_path).read()
value_count = np.unique(img_arr.flatten(), return_counts=True)
for ix, classy in enumerate(value_count[0]):
if classy > 0:
label_count = LabelCount.create(
name=CLASS_COUNT_MAP[str(int(classy))], count=int(value_count[1][ix])
)
count_list.append(label_count)
overview = LabelOverview(properties={})
overview.apply(property_key="labels", counts=count_list)
return overview |
Create test data archive for SouthAfricaCropType dataset.
Args:
paths: path to store test data
n_samples: number of samples.
Returns:
md5 hash of created archive | def generate_test_data() -> str:
"""Create test data archive for SouthAfricaCropType dataset.
Args:
paths: path to store test data
n_samples: number of samples.
Returns:
md5 hash of created archive
"""
paths = "south_africa_crop_type"
dtype = np.uint8
dtype_max = np.iinfo(dtype).max
SIZE = 256
np.random.seed(0)
s1_bands = ("VH", "VV")
s2_bands = (
"B01",
"B02",
"B03",
"B04",
"B05",
"B06",
"B07",
"B08",
"B8A",
"B09",
"B11",
"B12",
)
profile = {
"dtype": dtype,
"width": SIZE,
"height": SIZE,
"count": 1,
"crs": CRS.from_epsg(32634),
"transform": Affine(10.0, 0.0, 535840.0, 0.0, -10.0, 3079680.0),
}
train_imagery_s1_dir = os.path.join(paths, "train", "imagery", "s1")
train_imagery_s2_dir = os.path.join(paths, "train", "imagery", "s2")
train_labels_dir = os.path.join(paths, "train", "labels")
os.makedirs(train_imagery_s1_dir, exist_ok=True)
os.makedirs(train_imagery_s2_dir, exist_ok=True)
os.makedirs(train_labels_dir, exist_ok=True)
train_field_ids = ["12", "66"]
s1_timestamps = ["2017_04_01", "2017_07_28"]
s2_timestamps = ["2017_05_04", "2017_07_22"]
def write_raster(path: str, arr: np.array) -> None:
with rasterio.open(path, "w", **profile) as src:
src.write(arr, 1)
for field_id in train_field_ids:
for date in s1_timestamps:
s1_dir = os.path.join(train_imagery_s1_dir, field_id, date)
os.makedirs(s1_dir, exist_ok=True)
for band in s1_bands:
train_arr = np.random.randint(dtype_max, size=(SIZE, SIZE), dtype=dtype)
path = os.path.join(s1_dir, f"{field_id}_{date}_{band}_10m.tif")
write_raster(path, train_arr)
for date in s2_timestamps:
s2_dir = os.path.join(train_imagery_s2_dir, field_id, date)
os.makedirs(s2_dir, exist_ok=True)
for band in s2_bands:
train_arr = np.random.randint(dtype_max, size=(SIZE, SIZE), dtype=dtype)
path = os.path.join(s2_dir, f"{field_id}_{date}_{band}_10m.tif")
write_raster(path, train_arr)
label_path = os.path.join(train_labels_dir, f"{field_id}.tif")
label_arr = np.random.randint(9, size=(SIZE, SIZE), dtype=dtype)
write_raster(label_path, label_arr) |
Create the testing file. | def create_file(path: str, dtype: str):
"""Create the testing file."""
profile = {
"driver": "GTiff",
"dtype": dtype,
"count": 1,
"crs": CRS.from_epsg(32616),
"transform": Affine(10, 0.0, 399960.0, 0.0, -10, 4500000.0),
"height": SIZE,
"width": SIZE,
"compress": "lzw",
"predictor": 2,
}
allowed_values = [0, 1]
Z = np.random.choice(allowed_values, size=(SIZE, SIZE))
with rasterio.open(path, "w", **profile) as src:
src.write(Z, 1) |
Create test image
Args:
img_dir (str): Name of image directory
imgs (List[str]): List of images to be created
Returns:
List[List[float]]: Boundary coordinates | def create_test_image(img_dir: str, imgs: list[str]) -> list[list[float]]:
"""Create test image
Args:
img_dir (str): Name of image directory
imgs (List[str]): List of images to be created
Returns:
List[List[float]]: Boundary coordinates
"""
for img in imgs:
imgpath = os.path.join(img_dir, img)
Z = np.arange(4, dtype="uint16").reshape(2, 2)
count = img_count[img]
with rasterio.open(
imgpath,
"w",
driver="GTiff",
height=Z.shape[0],
width=Z.shape[1],
count=count,
dtype=Z.dtype,
crs=crs,
transform=transform,
) as dst:
for i in range(1, dst.count + 1):
dst.write(Z, i)
tim = rasterio.open(imgpath)
slice_index = [[1, 1], [1, 2], [2, 2], [2, 1], [1, 1]]
return [list(tim.transform * p) for p in slice_index] |
Create test label
Args:
lbldir (str): Name of label directory
lblname (str): Name of label file
coords (List[Tuple[float, float]]): Boundary coordinates
det_type (str): Type of dataset. Must be either buildings or roads.
empty (bool, optional): Creates empty label file if True. Defaults to False.
diff_crs (bool, optional): Assigns EPSG:3857 as CRS instead of
default EPSG:4326. Defaults to False. | def create_test_label(
lbldir: str,
lblname: str,
coords: list[list[float]],
det_type: str,
empty: bool = False,
diff_crs: bool = False,
) -> None:
"""Create test label
Args:
lbldir (str): Name of label directory
lblname (str): Name of label file
coords (List[Tuple[float, float]]): Boundary coordinates
det_type (str): Type of dataset. Must be either buildings or roads.
empty (bool, optional): Creates empty label file if True. Defaults to False.
diff_crs (bool, optional): Assigns EPSG:3857 as CRS instead of
default EPSG:4326. Defaults to False.
"""
if empty:
# Creates a new file
with open(os.path.join(lbldir, lblname), "w"):
pass
return
if det_type == "buildings":
meta_properties = OrderedDict()
geom = "Polygon"
rec = {
"type": "Feature",
"id": "0",
"properties": OrderedDict(),
"geometry": {"type": "Polygon", "coordinates": [coords]},
}
else:
meta_properties = OrderedDict(
[
("heading", "str"),
("lane_number", "str"),
("one_way_ty", "str"),
("paved", "str"),
("road_id", "int"),
("road_type", "str"),
("origarea", "int"),
("origlen", "float"),
("partialDec", "int"),
("truncated", "int"),
("bridge_type", "str"),
("inferred_speed_mph", "float"),
("inferred_speed_mps", "float"),
]
)
geom = "LineString"
dummy_vals = {"str": "a", "float": 45.0, "int": 0}
ROAD_DICT = [(k, dummy_vals[v]) for k, v in meta_properties.items()]
rec = {
"type": "Feature",
"id": "0",
"properties": OrderedDict(ROAD_DICT),
"geometry": {"type": "LineString", "coordinates": [coords[0], coords[2]]},
}
meta = {
"driver": "GeoJSON",
"schema": {"properties": meta_properties, "geometry": geom},
"crs": {"init": "epsg:4326"},
}
if diff_crs:
meta["crs"] = {"init": "epsg:3857"}
out_file = os.path.join(lbldir, lblname)
with fiona.open(out_file, "w", **meta) as dst:
dst.write(rec) |
Command-line interface to TorchGeo. | def main(args: ArgsType = None) -> None:
"""Command-line interface to TorchGeo."""
# Taken from https://github.com/pangeo-data/cog-best-practices
rasterio_best_practices = {
"GDAL_DISABLE_READDIR_ON_OPEN": "EMPTY_DIR",
"AWS_NO_SIGN_REQUEST": "YES",
"GDAL_MAX_RAW_BLOCK_CACHE_SIZE": "200000000",
"GDAL_SWATH_SIZE": "200000000",
"VSI_CURL_CACHE_SIZE": "200000000",
}
os.environ.update(rasterio_best_practices)
LightningCLI(
model_class=BaseTask,
datamodule_class=BaseDataModule,
seed_everything_default=0,
subclass_mode_model=True,
subclass_mode_data=True,
save_config_kwargs={"overwrite": True},
args=args,
) |
Custom object detection collate fn to handle variable boxes.
Args:
batch: list of sample dicts return by dataset
Returns:
batch dict output
.. versionadded:: 0.5 | def collate_fn(batch: list[dict[str, Tensor]]) -> dict[str, Any]:
"""Custom object detection collate fn to handle variable boxes.
Args:
batch: list of sample dicts return by dataset
Returns:
batch dict output
.. versionadded:: 0.5
"""
output: dict[str, Any] = {}
output["image"] = torch.stack([sample["image"] for sample in batch])
if "boxes" in batch[0]:
output["boxes"] = [sample["boxes"] for sample in batch]
if "label" in batch[0]:
output["label"] = [sample["label"] for sample in batch]
return output |
Custom collate fn for object detection and instance segmentation.
Args:
batch: list of sample dicts return by dataset
Returns:
batch dict output
.. versionadded:: 0.6 | def collate_fn_detection(batch: list[dict[str, Tensor]]) -> dict[str, Any]:
"""Custom collate fn for object detection and instance segmentation.
Args:
batch: list of sample dicts return by dataset
Returns:
batch dict output
.. versionadded:: 0.6
"""
output: dict[str, Any] = {}
output["image"] = [sample["image"] for sample in batch]
output["boxes"] = [sample["boxes"].float() for sample in batch]
if "labels" in batch[0]:
output["labels"] = [sample["labels"] for sample in batch]
else:
output["labels"] = [
torch.tensor([1] * len(sample["boxes"])) for sample in batch
]
if "masks" in batch[0]:
output["masks"] = [sample["masks"] for sample in batch]
return output |
Method for performing a single group-wise shuffle split of data.
Loosely based off of :class:`sklearn.model_selection.GroupShuffleSplit`.
Args:
groups: a sequence of group values used to split. Should be in the same order as
the data you want to split.
train_size: the proportion of groups to include in the train split. If None,
then it is set to complement `test_size`.
test_size: the proportion of groups to include in the test split (rounded up).
If None, then it is set to complement `train_size`.
random_state: controls the random splits (passed a seed to a
numpy.random.Generator), set for reproducible splits.
Returns:
train_indices, test_indices
Raises:
ValueError if `train_size` and `test_size` do not sum to 1, aren't in the range
(0,1), or are both None.
ValueError if the number of training or testing groups turns out to be 0. | def group_shuffle_split(
groups: Iterable[Any],
train_size: float | None = None,
test_size: float | None = None,
random_state: int | None = None,
) -> tuple[list[int], list[int]]:
"""Method for performing a single group-wise shuffle split of data.
Loosely based off of :class:`sklearn.model_selection.GroupShuffleSplit`.
Args:
groups: a sequence of group values used to split. Should be in the same order as
the data you want to split.
train_size: the proportion of groups to include in the train split. If None,
then it is set to complement `test_size`.
test_size: the proportion of groups to include in the test split (rounded up).
If None, then it is set to complement `train_size`.
random_state: controls the random splits (passed a seed to a
numpy.random.Generator), set for reproducible splits.
Returns:
train_indices, test_indices
Raises:
ValueError if `train_size` and `test_size` do not sum to 1, aren't in the range
(0,1), or are both None.
ValueError if the number of training or testing groups turns out to be 0.
"""
if train_size is None and test_size is None:
raise ValueError("You must specify `train_size`, `test_size`, or both.")
if (train_size is not None and test_size is not None) and (
not math.isclose(train_size + test_size, 1)
):
raise ValueError("`train_size` and `test_size` must sum to 1.")
if train_size is None and test_size is not None:
train_size = 1 - test_size
if test_size is None and train_size is not None:
test_size = 1 - train_size
assert train_size is not None and test_size is not None
if train_size <= 0 or train_size >= 1 or test_size <= 0 or test_size >= 1:
raise ValueError("`train_size` and `test_size` must be in the range (0,1).")
group_vals = sorted(set(groups))
n_groups = len(group_vals)
n_test_groups = round(n_groups * test_size)
n_train_groups = n_groups - n_test_groups
if n_train_groups == 0 or n_test_groups == 0:
raise ValueError(
f"{n_groups} groups were found, however the current settings of "
+ "`train_size` and `test_size` result in 0 training or testing groups."
)
generator = np.random.default_rng(seed=random_state)
train_group_vals = set(
generator.choice(group_vals, size=n_train_groups, replace=False)
)
train_idxs = []
test_idxs = []
for i, group_val in enumerate(groups):
if group_val in train_group_vals:
train_idxs.append(i)
else:
test_idxs.append(i)
return train_idxs, test_idxs |
Read a PASCAL VOC annotation file.
Args:
path: path to xml file
Returns:
dict of image filename, points, and class labels | def parse_pascal_voc(path: str) -> dict[str, Any]:
"""Read a PASCAL VOC annotation file.
Args:
path: path to xml file
Returns:
dict of image filename, points, and class labels
"""
et = parse(path)
element = et.getroot()
source = cast(Element, element.find("source"))
filename = cast(Element, source.find("filename")).text
labels, points = [], []
objects = cast(Element, element.find("objects"))
for obj in objects.findall("object"):
elm_points = cast(Element, obj.find("points"))
lis_points = elm_points.findall("point")
str_points = []
for point in lis_points:
text = cast(str, point.text)
str_points.append(text.split(","))
tup_points = [(float(p1), float(p2)) for p1, p2 in str_points]
possibleresult = cast(Element, obj.find("possibleresult"))
name = cast(Element, possibleresult.find("name"))
label = name.text
labels.append(label)
points.append(tup_points)
return dict(filename=filename, points=points, labels=labels) |
Read a PASCAL VOC annotation file.
Args:
path: path to xml file
Returns:
dict of image filename, points, and class labels | def parse_pascal_voc(path: str) -> dict[str, Any]:
"""Read a PASCAL VOC annotation file.
Args:
path: path to xml file
Returns:
dict of image filename, points, and class labels
"""
et = ElementTree.parse(path)
element = et.getroot()
filename = element.find("filename").text # type: ignore[union-attr]
labels, bboxes = [], []
for obj in element.findall("object"):
bndbox = obj.find("bndbox")
bbox = [
int(bndbox.find("xmin").text), # type: ignore[union-attr, arg-type]
int(bndbox.find("ymin").text), # type: ignore[union-attr, arg-type]
int(bndbox.find("xmax").text), # type: ignore[union-attr, arg-type]
int(bndbox.find("ymax").text), # type: ignore[union-attr, arg-type]
]
label_var = obj.find("damage")
if label_var is not None:
label = label_var.text
else:
label = "other"
bboxes.append(bbox)
labels.append(label)
return dict(filename=filename, bboxes=bboxes, labels=labels) |
Disambiguate partial timestamps.
Based on :func:`torchgeo.datasets.utils.disambiguate_timestamps`.
Args:
year: year, possibly nan
month: month, possibly nan
day: day, possibly nan
Returns:
minimum and maximum possible time range | def _disambiguate_timestamps(
year: float, month: float, day: float
) -> tuple[float, float]:
"""Disambiguate partial timestamps.
Based on :func:`torchgeo.datasets.utils.disambiguate_timestamps`.
Args:
year: year, possibly nan
month: month, possibly nan
day: day, possibly nan
Returns:
minimum and maximum possible time range
"""
if np.isnan(year):
# No temporal info
return 0, sys.maxsize
elif np.isnan(month):
# Year resolution
mint = datetime(int(year), 1, 1)
maxt = datetime(int(year) + 1, 1, 1)
elif np.isnan(day):
# Month resolution
mint = datetime(int(year), int(month), 1)
if month == 12:
maxt = datetime(int(year) + 1, 1, 1)
else:
maxt = datetime(int(year), int(month) + 1, 1)
else:
# Day resolution
mint = datetime(int(year), int(month), int(day))
maxt = mint + timedelta(days=1)
maxt -= timedelta(microseconds=1)
return mint.timestamp(), maxt.timestamp() |
Utility to divide a number into a list of integers according to fractions.
Implementation based on :meth:`torch.utils.data.random_split`.
Args:
fractions: list of fractions
total: total to be divided
Returns:
List of lengths.
.. versionadded:: 0.5 | def _fractions_to_lengths(fractions: Sequence[float], total: int) -> Sequence[int]:
"""Utility to divide a number into a list of integers according to fractions.
Implementation based on :meth:`torch.utils.data.random_split`.
Args:
fractions: list of fractions
total: total to be divided
Returns:
List of lengths.
.. versionadded:: 0.5
"""
lengths = [floor(frac * total) for frac in fractions]
remainder = int(total - sum(lengths))
# Add 1 to all the lengths in round-robin fashion until the remainder is 0
for i in range(remainder):
idx_to_add_at = i % len(lengths)
lengths[idx_to_add_at] += 1
return lengths |
Split a GeoDataset randomly assigning its index's BoundingBoxes.
This function will go through each BoundingBox in the GeoDataset's index and
randomly assign it to new GeoDatasets.
Args:
dataset: dataset to be split
lengths: lengths or fractions of splits to be produced
generator: (optional) generator used for the random permutation
Returns:
A list of the subset datasets.
.. versionadded:: 0.5 | def random_bbox_assignment(
dataset: GeoDataset,
lengths: Sequence[float],
generator: Generator | None = default_generator,
) -> list[GeoDataset]:
"""Split a GeoDataset randomly assigning its index's BoundingBoxes.
This function will go through each BoundingBox in the GeoDataset's index and
randomly assign it to new GeoDatasets.
Args:
dataset: dataset to be split
lengths: lengths or fractions of splits to be produced
generator: (optional) generator used for the random permutation
Returns:
A list of the subset datasets.
.. versionadded:: 0.5
"""
if not (isclose(sum(lengths), 1) or isclose(sum(lengths), len(dataset))):
raise ValueError(
"Sum of input lengths must equal 1 or the length of dataset's index."
)
if any(n <= 0 for n in lengths):
raise ValueError("All items in input lengths must be greater than 0.")
if isclose(sum(lengths), 1):
lengths = _fractions_to_lengths(lengths, len(dataset))
lengths = cast(Sequence[int], lengths)
hits = list(dataset.index.intersection(dataset.index.bounds, objects=True))
hits = [hits[i] for i in randperm(sum(lengths), generator=generator)]
new_indexes = [
Index(interleaved=False, properties=Property(dimension=3)) for _ in lengths
]
for i, length in enumerate(lengths):
for j in range(length):
hit = hits.pop()
new_indexes[i].insert(j, hit.bounds, hit.object)
new_datasets = []
for index in new_indexes:
ds = deepcopy(dataset)
ds.index = index
new_datasets.append(ds)
return new_datasets |
Split a GeoDataset randomly splitting its index's BoundingBoxes.
This function will go through each BoundingBox in the GeoDataset's index,
split it in a random direction and assign the resulting BoundingBoxes to
new GeoDatasets.
Args:
dataset: dataset to be split
fractions: fractions of splits to be produced
generator: generator used for the random permutation
Returns:
A list of the subset datasets.
.. versionadded:: 0.5 | def random_bbox_splitting(
dataset: GeoDataset,
fractions: Sequence[float],
generator: Generator | None = default_generator,
) -> list[GeoDataset]:
"""Split a GeoDataset randomly splitting its index's BoundingBoxes.
This function will go through each BoundingBox in the GeoDataset's index,
split it in a random direction and assign the resulting BoundingBoxes to
new GeoDatasets.
Args:
dataset: dataset to be split
fractions: fractions of splits to be produced
generator: generator used for the random permutation
Returns:
A list of the subset datasets.
.. versionadded:: 0.5
"""
if not isclose(sum(fractions), 1):
raise ValueError("Sum of input fractions must equal 1.")
if any(n <= 0 for n in fractions):
raise ValueError("All items in input fractions must be greater than 0.")
new_indexes = [
Index(interleaved=False, properties=Property(dimension=3)) for _ in fractions
]
for i, hit in enumerate(
dataset.index.intersection(dataset.index.bounds, objects=True)
):
box = BoundingBox(*hit.bounds)
fraction_left = 1.0
# Randomly choose the split direction
horizontal, flip = randint(0, 2, (2,), generator=generator)
for j, fraction in enumerate(fractions):
if fraction_left == fraction:
# For the last fraction, no need to split again
new_box = box
elif flip:
# new_box corresponds to fraction, box is the remainder that we might
# split again in the next iteration. Each split is done according to
# fraction wrt what's left
box, new_box = box.split(
(fraction_left - fraction) / fraction_left, horizontal
)
else:
# Same as above, but without flipping
new_box, box = box.split(fraction / fraction_left, horizontal)
new_indexes[j].insert(i, tuple(new_box), hit.object)
fraction_left -= fraction
horizontal = not horizontal
new_datasets = []
for index in new_indexes:
ds = deepcopy(dataset)
ds.index = index
new_datasets.append(ds)
return new_datasets |
Overlays a grid over a GeoDataset and randomly assigns cells to new GeoDatasets.
This function will go through each BoundingBox in the GeoDataset's index, overlay
a grid over it, and randomly assign each cell to new GeoDatasets.
Args:
dataset: dataset to be split
fractions: fractions of splits to be produced
grid_size: number of rows and columns for the grid
generator: generator used for the random permutation
Returns:
A list of the subset datasets.
.. versionadded:: 0.5 | def random_grid_cell_assignment(
dataset: GeoDataset,
fractions: Sequence[float],
grid_size: int = 6,
generator: Generator | None = default_generator,
) -> list[GeoDataset]:
"""Overlays a grid over a GeoDataset and randomly assigns cells to new GeoDatasets.
This function will go through each BoundingBox in the GeoDataset's index, overlay
a grid over it, and randomly assign each cell to new GeoDatasets.
Args:
dataset: dataset to be split
fractions: fractions of splits to be produced
grid_size: number of rows and columns for the grid
generator: generator used for the random permutation
Returns:
A list of the subset datasets.
.. versionadded:: 0.5
"""
if not isclose(sum(fractions), 1):
raise ValueError("Sum of input fractions must equal 1.")
if any(n <= 0 for n in fractions):
raise ValueError("All items in input fractions must be greater than 0.")
if grid_size < 2:
raise ValueError("Input grid_size must be greater than 1.")
new_indexes = [
Index(interleaved=False, properties=Property(dimension=3)) for _ in fractions
]
lengths = _fractions_to_lengths(fractions, len(dataset) * grid_size**2)
cells = []
# Generate the grid's cells for each bbox in index
for i, hit in enumerate(
dataset.index.intersection(dataset.index.bounds, objects=True)
):
minx, maxx, miny, maxy, mint, maxt = hit.bounds
stridex = (maxx - minx) / grid_size
stridey = (maxy - miny) / grid_size
cells.extend(
[
(
(
minx + x * stridex,
minx + (x + 1) * stridex,
miny + y * stridey,
miny + (y + 1) * stridey,
mint,
maxt,
),
hit.object,
)
for x in range(grid_size)
for y in range(grid_size)
]
)
# Randomly assign cells to each new index
cells = [cells[i] for i in randperm(len(cells), generator=generator)]
for i, length in enumerate(lengths):
for j in range(length):
cell = cells.pop()
new_indexes[i].insert(j, cell[0], cell[1])
new_datasets = []
for index in new_indexes:
ds = deepcopy(dataset)
ds.index = index
new_datasets.append(ds)
return new_datasets |
Split a GeoDataset intersecting it with a ROI for each desired new GeoDataset.
Args:
dataset: dataset to be split
rois: regions of interest of splits to be produced
Returns:
A list of the subset datasets.
.. versionadded:: 0.5 | def roi_split(dataset: GeoDataset, rois: Sequence[BoundingBox]) -> list[GeoDataset]:
"""Split a GeoDataset intersecting it with a ROI for each desired new GeoDataset.
Args:
dataset: dataset to be split
rois: regions of interest of splits to be produced
Returns:
A list of the subset datasets.
.. versionadded:: 0.5
"""
new_indexes = [
Index(interleaved=False, properties=Property(dimension=3)) for _ in rois
]
for i, roi in enumerate(rois):
if any(roi.intersects(x) and (roi & x).area > 0 for x in rois[i + 1 :]):
raise ValueError("ROIs in input rois can't overlap.")
j = 0
for hit in dataset.index.intersection(tuple(roi), objects=True):
box = BoundingBox(*hit.bounds)
new_box = box & roi
if new_box.area > 0:
new_indexes[i].insert(j, tuple(new_box), hit.object)
j += 1
new_datasets = []
for index in new_indexes:
ds = deepcopy(dataset)
ds.index = index
new_datasets.append(ds)
return new_datasets |
Split a GeoDataset on its time dimension to create non-overlapping GeoDatasets.
Args:
dataset: dataset to be split
lengths: lengths, fractions or pairs of timestamps (start, end) of splits
to be produced
Returns:
A list of the subset datasets.
.. versionadded:: 0.5 | def time_series_split(
dataset: GeoDataset, lengths: Sequence[float | tuple[float, float]]
) -> list[GeoDataset]:
"""Split a GeoDataset on its time dimension to create non-overlapping GeoDatasets.
Args:
dataset: dataset to be split
lengths: lengths, fractions or pairs of timestamps (start, end) of splits
to be produced
Returns:
A list of the subset datasets.
.. versionadded:: 0.5
"""
minx, maxx, miny, maxy, mint, maxt = dataset.bounds
totalt = maxt - mint
if not all(isinstance(x, tuple) for x in lengths):
lengths = cast(Sequence[float], lengths)
if not (isclose(sum(lengths), 1) or isclose(sum(lengths), totalt)):
raise ValueError(
"Sum of input lengths must equal 1 or the dataset's time length."
)
if any(n <= 0 for n in lengths):
raise ValueError("All items in input lengths must be greater than 0.")
if isclose(sum(lengths), 1):
lengths = [totalt * f for f in lengths]
lengths = [
(mint + offset - length, mint + offset) # type: ignore[operator]
for offset, length in zip(accumulate(lengths), lengths)
]
lengths = cast(Sequence[tuple[float, float]], lengths)
new_indexes = [
Index(interleaved=False, properties=Property(dimension=3)) for _ in lengths
]
_totalt = 0.0
for i, (start, end) in enumerate(lengths):
if start >= end:
raise ValueError(
"Pairs of timestamps in lengths must have end greater than start."
)
if start < mint or end > maxt:
raise ValueError(
"Pairs of timestamps in lengths can't be out of dataset's time bounds."
)
if any(start < x < end or start < y < end for x, y in lengths[i + 1 :]):
raise ValueError("Pairs of timestamps in lengths can't overlap.")
# Remove one microsecond from each BoundingBox's maxt to avoid overlapping
offset = 0 if i == len(lengths) - 1 else 1e-6
roi = BoundingBox(minx, maxx, miny, maxy, start, end - offset)
j = 0
for hit in dataset.index.intersection(tuple(roi), objects=True):
box = BoundingBox(*hit.bounds)
new_box = box & roi
if new_box.volume > 0:
new_indexes[i].insert(j, tuple(new_box), hit.object)
j += 1
_totalt += end - start
if not isclose(_totalt, totalt):
raise ValueError(
"Pairs of timestamps in lengths must cover dataset's time bounds."
)
new_datasets = []
for index in new_indexes:
ds = deepcopy(dataset)
ds.index = index
new_datasets.append(ds)
return new_datasets |
Extract an archive.
Args:
src: file to be extracted
dst: directory to extract to (defaults to dirname of ``src``)
Raises:
RuntimeError: if src file has unknown archival/compression scheme | def extract_archive(src: str, dst: str | None = None) -> None:
"""Extract an archive.
Args:
src: file to be extracted
dst: directory to extract to (defaults to dirname of ``src``)
Raises:
RuntimeError: if src file has unknown archival/compression scheme
"""
if dst is None:
dst = os.path.dirname(src)
suffix_and_extractor: list[tuple[str | tuple[str, ...], Any]] = [
(".rar", _rarfile.RarFile),
(
(".tar", ".tar.gz", ".tar.bz2", ".tar.xz", ".tgz", ".tbz2", ".tbz", ".txz"),
tarfile.open,
),
(".zip", _zipfile.ZipFile),
]
for suffix, extractor in suffix_and_extractor:
if src.endswith(suffix):
with extractor(src, "r") as f:
f.extractall(dst)
return
suffix_and_decompressor: list[tuple[str, Any]] = [
(".bz2", bz2.open),
(".gz", gzip.open),
(".xz", lzma.open),
]
for suffix, decompressor in suffix_and_decompressor:
if src.endswith(suffix):
dst = os.path.join(dst, os.path.basename(src).replace(suffix, ""))
with decompressor(src, "rb") as sf, open(dst, "wb") as df:
df.write(sf.read())
return
raise RuntimeError("src file has unknown archival/compression scheme") |
Download and extract an archive.
Args:
url: URL to download
download_root: directory to download to
extract_root: directory to extract to (defaults to ``download_root``)
filename: download filename (defaults to basename of ``url``)
md5: checksum for download verification | def download_and_extract_archive(
url: str,
download_root: str,
extract_root: str | None = None,
filename: str | None = None,
md5: str | None = None,
) -> None:
"""Download and extract an archive.
Args:
url: URL to download
download_root: directory to download to
extract_root: directory to extract to (defaults to ``download_root``)
filename: download filename (defaults to basename of ``url``)
md5: checksum for download verification
"""
download_root = os.path.expanduser(download_root)
if extract_root is None:
extract_root = download_root
if not filename:
filename = os.path.basename(url)
download_url(url, download_root, filename, md5)
archive = os.path.join(download_root, filename)
print(f"Extracting {archive} to {extract_root}")
extract_archive(archive, extract_root) |
Download a dataset from Radiant Earth.
Args:
dataset_id: the ID of the dataset to fetch
download_root: directory to download to
api_key: the API key to use for all requests from the session. Can also be
passed in via the ``MLHUB_API_KEY`` environment variable, or configured in
``~/.mlhub/profiles``. | def download_radiant_mlhub_dataset(
dataset_id: str, download_root: str, api_key: str | None = None
) -> None:
"""Download a dataset from Radiant Earth.
Args:
dataset_id: the ID of the dataset to fetch
download_root: directory to download to
api_key: the API key to use for all requests from the session. Can also be
passed in via the ``MLHUB_API_KEY`` environment variable, or configured in
``~/.mlhub/profiles``.
"""
try:
import radiant_mlhub
except ImportError:
raise ImportError(
"radiant_mlhub is not installed and is required to download this dataset"
)
dataset = radiant_mlhub.Dataset.fetch(dataset_id, api_key=api_key)
dataset.download(output_dir=download_root, api_key=api_key) |
Download a collection from Radiant Earth.
Args:
collection_id: the ID of the collection to fetch
download_root: directory to download to
api_key: the API key to use for all requests from the session. Can also be
passed in via the ``MLHUB_API_KEY`` environment variable, or configured in
``~/.mlhub/profiles``. | def download_radiant_mlhub_collection(
collection_id: str, download_root: str, api_key: str | None = None
) -> None:
"""Download a collection from Radiant Earth.
Args:
collection_id: the ID of the collection to fetch
download_root: directory to download to
api_key: the API key to use for all requests from the session. Can also be
passed in via the ``MLHUB_API_KEY`` environment variable, or configured in
``~/.mlhub/profiles``.
"""
try:
import radiant_mlhub
except ImportError:
raise ImportError(
"radiant_mlhub is not installed and is required to download this collection"
)
collection = radiant_mlhub.Collection.fetch(collection_id, api_key=api_key)
collection.download(output_dir=download_root, api_key=api_key) |
Disambiguate partial timestamps.
TorchGeo stores the timestamp of each file in a spatiotemporal R-tree. If the full
timestamp isn't known, a file could represent a range of time. For example, in the
CDL dataset, each mask spans an entire year. This method returns the maximum
possible range of timestamps that ``date_str`` could belong to. It does this by
parsing ``format`` to determine the level of precision of ``date_str``.
Args:
date_str: string representing date and time of a data point
format: format codes accepted by :meth:`datetime.datetime.strptime`
Returns:
(mint, maxt) tuple for indexing | def disambiguate_timestamp(date_str: str, format: str) -> tuple[float, float]:
"""Disambiguate partial timestamps.
TorchGeo stores the timestamp of each file in a spatiotemporal R-tree. If the full
timestamp isn't known, a file could represent a range of time. For example, in the
CDL dataset, each mask spans an entire year. This method returns the maximum
possible range of timestamps that ``date_str`` could belong to. It does this by
parsing ``format`` to determine the level of precision of ``date_str``.
Args:
date_str: string representing date and time of a data point
format: format codes accepted by :meth:`datetime.datetime.strptime`
Returns:
(mint, maxt) tuple for indexing
"""
mint = datetime.strptime(date_str, format)
# TODO: This doesn't correctly handle literal `%%` characters in format
# TODO: May have issues with time zones, UTC vs. local time, and DST
# TODO: This is really tedious, is there a better way to do this?
if not any([f"%{c}" in format for c in "yYcxG"]):
# No temporal info
return 0, sys.maxsize
elif not any([f"%{c}" in format for c in "bBmjUWcxV"]):
# Year resolution
maxt = datetime(mint.year + 1, 1, 1)
elif not any([f"%{c}" in format for c in "aAwdjcxV"]):
# Month resolution
if mint.month == 12:
maxt = datetime(mint.year + 1, 1, 1)
else:
maxt = datetime(mint.year, mint.month + 1, 1)
elif not any([f"%{c}" in format for c in "HIcX"]):
# Day resolution
maxt = mint + timedelta(days=1)
elif not any([f"%{c}" in format for c in "McX"]):
# Hour resolution
maxt = mint + timedelta(hours=1)
elif not any([f"%{c}" in format for c in "ScX"]):
# Minute resolution
maxt = mint + timedelta(minutes=1)
elif not any([f"%{c}" in format for c in "f"]):
# Second resolution
maxt = mint + timedelta(seconds=1)
else:
# Microsecond resolution
maxt = mint + timedelta(microseconds=1)
maxt -= timedelta(microseconds=1)
return mint.timestamp(), maxt.timestamp() |
Context manager for changing directories.
Args:
dirname: directory to temporarily change to
create: if True, create the destination directory | def working_dir(dirname: str, create: bool = False) -> Iterator[None]:
"""Context manager for changing directories.
Args:
dirname: directory to temporarily change to
create: if True, create the destination directory
"""
if create:
os.makedirs(dirname, exist_ok=True)
cwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(cwd) |
Convert a list of dictionaries to a dictionary of lists.
Args:
samples: a list of dictionaries
Returns:
a dictionary of lists
.. versionadded:: 0.2 | def _list_dict_to_dict_list(samples: Iterable[dict[Any, Any]]) -> dict[Any, list[Any]]:
"""Convert a list of dictionaries to a dictionary of lists.
Args:
samples: a list of dictionaries
Returns:
a dictionary of lists
.. versionadded:: 0.2
"""
collated = collections.defaultdict(list)
for sample in samples:
for key, value in sample.items():
collated[key].append(value)
return collated |
Convert a dictionary of lists to a list of dictionaries.
Args:
sample: a dictionary of lists
Returns:
a list of dictionaries
.. versionadded:: 0.2 | def _dict_list_to_list_dict(sample: dict[Any, Sequence[Any]]) -> list[dict[Any, Any]]:
"""Convert a dictionary of lists to a list of dictionaries.
Args:
sample: a dictionary of lists
Returns:
a list of dictionaries
.. versionadded:: 0.2
"""
uncollated: list[dict[Any, Any]] = [
{} for _ in range(max(map(len, sample.values())))
]
for key, values in sample.items():
for i, value in enumerate(values):
uncollated[i][key] = value
return uncollated |
Stack a list of samples along a new axis.
Useful for forming a mini-batch of samples to pass to
:class:`torch.utils.data.DataLoader`.
Args:
samples: list of samples
Returns:
a single sample
.. versionadded:: 0.2 | def stack_samples(samples: Iterable[dict[Any, Any]]) -> dict[Any, Any]:
"""Stack a list of samples along a new axis.
Useful for forming a mini-batch of samples to pass to
:class:`torch.utils.data.DataLoader`.
Args:
samples: list of samples
Returns:
a single sample
.. versionadded:: 0.2
"""
collated: dict[Any, Any] = _list_dict_to_dict_list(samples)
for key, value in collated.items():
if isinstance(value[0], Tensor):
collated[key] = torch.stack(value)
return collated |
Concatenate a list of samples along an existing axis.
Useful for joining samples in a :class:`torchgeo.datasets.IntersectionDataset`.
Args:
samples: list of samples
Returns:
a single sample
.. versionadded:: 0.2 | def concat_samples(samples: Iterable[dict[Any, Any]]) -> dict[Any, Any]:
"""Concatenate a list of samples along an existing axis.
Useful for joining samples in a :class:`torchgeo.datasets.IntersectionDataset`.
Args:
samples: list of samples
Returns:
a single sample
.. versionadded:: 0.2
"""
collated: dict[Any, Any] = _list_dict_to_dict_list(samples)
for key, value in collated.items():
if isinstance(value[0], Tensor):
collated[key] = torch.cat(value)
else:
collated[key] = value[0]
return collated |
Merge a list of samples.
Useful for joining samples in a :class:`torchgeo.datasets.UnionDataset`.
Args:
samples: list of samples
Returns:
a single sample
.. versionadded:: 0.2 | def merge_samples(samples: Iterable[dict[Any, Any]]) -> dict[Any, Any]:
"""Merge a list of samples.
Useful for joining samples in a :class:`torchgeo.datasets.UnionDataset`.
Args:
samples: list of samples
Returns:
a single sample
.. versionadded:: 0.2
"""
collated: dict[Any, Any] = {}
for sample in samples:
for key, value in sample.items():
if key in collated and isinstance(value, Tensor):
# Take the maximum so that nodata values (zeros) get replaced
# by data values whenever possible
collated[key] = torch.maximum(collated[key], value)
else:
collated[key] = value
return collated |
Reverse of :func:`stack_samples`.
Useful for turning a mini-batch of samples into a list of samples. These individual
samples can then be plotted using a dataset's ``plot`` method.
Args:
sample: a mini-batch of samples
Returns:
list of samples
.. versionadded:: 0.2 | def unbind_samples(sample: dict[Any, Sequence[Any]]) -> list[dict[Any, Any]]:
"""Reverse of :func:`stack_samples`.
Useful for turning a mini-batch of samples into a list of samples. These individual
samples can then be plotted using a dataset's ``plot`` method.
Args:
sample: a mini-batch of samples
Returns:
list of samples
.. versionadded:: 0.2
"""
for key, values in sample.items():
if isinstance(values, Tensor):
sample[key] = torch.unbind(values)
return _dict_list_to_list_dict(sample) |
Load an image file using rasterio.
Args:
path: path to the image to be loaded
Returns:
the image | def rasterio_loader(path: str) -> np.typing.NDArray[np.int_]:
"""Load an image file using rasterio.
Args:
path: path to the image to be loaded
Returns:
the image
"""
with rasterio.open(path) as f:
array: np.typing.NDArray[np.int_] = f.read().astype(np.int32)
# NonGeoClassificationDataset expects images returned with channels last (HWC)
array = array.transpose(1, 2, 0)
return array |
Sort Sentinel-2 band files in the correct order. | def sort_sentinel2_bands(x: str) -> str:
"""Sort Sentinel-2 band files in the correct order."""
x = os.path.basename(x).split("_")[-1]
x = os.path.splitext(x)[0]
if x == "B8A":
x = "B08A"
return x |
Overlay a semantic segmentation mask onto an image.
Args:
image: tensor of shape (3, h, w) and dtype uint8
mask: tensor of shape (h, w) with pixel values representing the classes and
dtype bool
alpha: alpha blend factor
colors: list of RGB int tuples, or color strings e.g. red, #FF00FF
Returns:
a version of ``image`` overlayed with the colors given by ``mask`` and
``colors`` | def draw_semantic_segmentation_masks(
image: Tensor,
mask: Tensor,
alpha: float = 0.5,
colors: Sequence[str | tuple[int, int, int]] | None = None,
) -> np.typing.NDArray[np.uint8]:
"""Overlay a semantic segmentation mask onto an image.
Args:
image: tensor of shape (3, h, w) and dtype uint8
mask: tensor of shape (h, w) with pixel values representing the classes and
dtype bool
alpha: alpha blend factor
colors: list of RGB int tuples, or color strings e.g. red, #FF00FF
Returns:
a version of ``image`` overlayed with the colors given by ``mask`` and
``colors``
"""
classes = torch.from_numpy(np.arange(len(colors) if colors else 0, dtype=np.uint8))
class_masks = mask == classes[:, None, None]
img = draw_segmentation_masks(
image=image.byte(), masks=class_masks, alpha=alpha, colors=colors
)
img = img.permute((1, 2, 0)).numpy().astype(np.uint8)
return cast("np.typing.NDArray[np.uint8]", img) |
Converts an RGB colormap mask to a integer mask.
Args:
rgb: array mask of coded with RGB tuples
colors: list of RGB tuples to convert to integer indices
Returns:
integer array mask | def rgb_to_mask(
rgb: np.typing.NDArray[np.uint8], colors: list[tuple[int, int, int]]
) -> np.typing.NDArray[np.uint8]:
"""Converts an RGB colormap mask to a integer mask.
Args:
rgb: array mask of coded with RGB tuples
colors: list of RGB tuples to convert to integer indices
Returns:
integer array mask
"""
assert len(colors) <= 256 # we currently return a uint8 array, so the largest value
# we can map is 255
h, w = rgb.shape[:2]
mask: np.typing.NDArray[np.uint8] = np.zeros(shape=(h, w), dtype=np.uint8)
for i, c in enumerate(colors):
cmask = rgb == c
# Only update mask if class is present in mask
if isinstance(cmask, np.ndarray):
mask[cmask.all(axis=-1)] = i
return mask |
Applies percentile normalization to an input image.
Specifically, this will rescale the values in the input such that values <= the
lower percentile value will be 0 and values >= the upper percentile value will be 1.
Using the 2nd and 98th percentile usually results in good visualizations.
Args:
img: image to normalize
lower: lower percentile in range [0,100]
upper: upper percentile in range [0,100]
axis: Axis or axes along which the percentiles are computed. The default
is to compute the percentile(s) along a flattened version of the array.
Returns:
normalized version of ``img``
.. versionadded:: 0.2 | def percentile_normalization(
img: np.typing.NDArray[np.int_],
lower: float = 2,
upper: float = 98,
axis: int | Sequence[int] | None = None,
) -> np.typing.NDArray[np.int_]:
"""Applies percentile normalization to an input image.
Specifically, this will rescale the values in the input such that values <= the
lower percentile value will be 0 and values >= the upper percentile value will be 1.
Using the 2nd and 98th percentile usually results in good visualizations.
Args:
img: image to normalize
lower: lower percentile in range [0,100]
upper: upper percentile in range [0,100]
axis: Axis or axes along which the percentiles are computed. The default
is to compute the percentile(s) along a flattened version of the array.
Returns:
normalized version of ``img``
.. versionadded:: 0.2
"""
assert lower < upper
lower_percentile = np.percentile(img, lower, axis=axis)
upper_percentile = np.percentile(img, upper, axis=axis)
img_normalized: np.typing.NDArray[np.int_] = np.clip(
(img - lower_percentile) / (upper_percentile - lower_percentile + 1e-5), 0, 1
)
return img_normalized |
Checks if the given path is pointing to a Virtual File System.
.. note::
Does not check if the path exists, or if it is a dir or file.
VSI can for instance be Cloud Storage Blobs or zip-archives.
They will start with a prefix indicating this.
For examples of these, see references for the two accepted syntaxes.
* https://gdal.org/user/virtual_file_systems.html
* https://rasterio.readthedocs.io/en/latest/topics/datasets.html
Args:
path: string representing a directory or file
Returns:
True if path is on a virtual file system, else False
.. versionadded:: 0.6 | def path_is_vsi(path: str) -> bool:
"""Checks if the given path is pointing to a Virtual File System.
.. note::
Does not check if the path exists, or if it is a dir or file.
VSI can for instance be Cloud Storage Blobs or zip-archives.
They will start with a prefix indicating this.
For examples of these, see references for the two accepted syntaxes.
* https://gdal.org/user/virtual_file_systems.html
* https://rasterio.readthedocs.io/en/latest/topics/datasets.html
Args:
path: string representing a directory or file
Returns:
True if path is on a virtual file system, else False
.. versionadded:: 0.6
"""
return "://" in path or path.startswith("/vsi") |
Converts a :class:`numpy.ndarray` to :class:`torch.Tensor`.
:func:`torch.from_tensor` rejects numpy types like uint16 that are not supported
in pytorch. This function instead casts uint16 and uint32 numpy arrays to an
appropriate pytorch type without loss of precision.
For example, a uint32 array becomes an int64 tensor. uint64 arrays will continue
to raise errors since there is no suitable torch dtype.
The returned tensor is a copy.
Args:
array: a :class:`numpy.ndarray`.
Returns:
A :class:`torch.Tensor` with the same dtype as array unless array is uint16 or
uint32, in which case an int32 or int64 Tensor is returned, respectively.
.. versionadded:: 0.6 | def array_to_tensor(array: np.typing.NDArray[Any]) -> Tensor:
"""Converts a :class:`numpy.ndarray` to :class:`torch.Tensor`.
:func:`torch.from_tensor` rejects numpy types like uint16 that are not supported
in pytorch. This function instead casts uint16 and uint32 numpy arrays to an
appropriate pytorch type without loss of precision.
For example, a uint32 array becomes an int64 tensor. uint64 arrays will continue
to raise errors since there is no suitable torch dtype.
The returned tensor is a copy.
Args:
array: a :class:`numpy.ndarray`.
Returns:
A :class:`torch.Tensor` with the same dtype as array unless array is uint16 or
uint32, in which case an int32 or int64 Tensor is returned, respectively.
.. versionadded:: 0.6
"""
if array.dtype == np.uint16:
array = array.astype(np.int32)
elif array.dtype == np.uint32:
array = array.astype(np.int64)
return torch.tensor(array) |
Convert coco polygons to mask tensor.
Args:
segmentations (List[int]): polygon coordinates
height (int): image height
width (int): image width
Returns:
Tensor: Mask tensor | def convert_coco_poly_to_mask(
segmentations: list[int], height: int, width: int
) -> Tensor:
"""Convert coco polygons to mask tensor.
Args:
segmentations (List[int]): polygon coordinates
height (int): image height
width (int): image width
Returns:
Tensor: Mask tensor
"""
from pycocotools import mask as coco_mask # noqa: F401
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
masks_tensor = torch.stack(masks, dim=0)
return masks_tensor |
Get an instantiated model from its name.
.. versionadded:: 0.4
Args:
name: Name of the model.
*args: Additional arguments passed to the model builder method.
**kwargs: Additional keyword arguments passed to the model builder method.
Returns:
An instantiated model. | def get_model(name: str, *args: Any, **kwargs: Any) -> nn.Module:
"""Get an instantiated model from its name.
.. versionadded:: 0.4
Args:
name: Name of the model.
*args: Additional arguments passed to the model builder method.
**kwargs: Additional keyword arguments passed to the model builder method.
Returns:
An instantiated model.
"""
model: nn.Module = _model[name](*args, **kwargs)
return model |
Get the weights enum class associated with a given model.
.. versionadded:: 0.4
Args:
name: Model builder function or the name under which it is registered.
Returns:
The weights enum class associated with the model. | def get_model_weights(name: Callable[..., nn.Module] | str) -> WeightsEnum:
"""Get the weights enum class associated with a given model.
.. versionadded:: 0.4
Args:
name: Model builder function or the name under which it is registered.
Returns:
The weights enum class associated with the model.
"""
return _model_weights[name] |
Get the weights enum value by its full name.
.. versionadded:: 0.4
Args:
name: Name of the weight enum entry.
Returns:
The requested weight enum. | def get_weight(name: str) -> WeightsEnum:
"""Get the weights enum value by its full name.
.. versionadded:: 0.4
Args:
name: Name of the weight enum entry.
Returns:
The requested weight enum.
"""
return eval(name) |
List the registered models.
.. versionadded:: 0.4
Returns:
A list of registered models. | def list_models() -> list[str]:
"""List the registered models.
.. versionadded:: 0.4
Returns:
A list of registered models.
"""
return list(_model.keys()) |
Compute the 1D sine/cosine position embedding.
Args:
embed_dim: Output dimension D for each position. Must be even.
pos: A list of positions to be encoded, of size (M,).
Returns:
Position embeddings of size (M, D).
Raises:
AssertionError: If *embed_dim* is not even. | def position_embedding(embed_dim: int, pos: Tensor) -> Tensor:
"""Compute the 1D sine/cosine position embedding.
Args:
embed_dim: Output dimension D for each position. Must be even.
pos: A list of positions to be encoded, of size (M,).
Returns:
Position embeddings of size (M, D).
Raises:
AssertionError: If *embed_dim* is not even.
"""
assert embed_dim % 2 == 0
omega = torch.arange(embed_dim // 2, dtype=torch.float32, device=pos.device)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = torch.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = torch.sin(out) # (M, D/2)
emb_cos = torch.cos(out) # (M, D/2)
emb = torch.cat([emb_sin, emb_cos], dim=1) # (M, D)
return emb |
Dynamic One-For-All (DOFA) small patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA small 16 model. | def dofa_small_patch16_224(**kwargs: Any) -> DOFA:
"""Dynamic One-For-All (DOFA) small patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA small 16 model.
"""
model = DOFA(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
return model |
Dynamic One-For-All (DOFA) base patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
weights: Pre-trained model weights to use.
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA base 16 model. | def dofa_base_patch16_224(
weights: DOFABase16_Weights | None = None, **kwargs: Any
) -> DOFA:
"""Dynamic One-For-All (DOFA) base patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
weights: Pre-trained model weights to use.
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA base 16 model.
"""
model = DOFA(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
if weights:
missing_keys, unexpected_keys = model.load_state_dict(
weights.get_state_dict(progress=True), strict=False
)
# Both fc_norm and head are generated dynamically
assert set(missing_keys) <= {
"fc_norm.weight",
"fc_norm.bias",
"head.weight",
"head.bias",
}
assert not unexpected_keys
return model |
Dynamic One-For-All (DOFA) large patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
weights: Pre-trained model weights to use.
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA large 16 model. | def dofa_large_patch16_224(
weights: DOFALarge16_Weights | None = None, **kwargs: Any
) -> DOFA:
"""Dynamic One-For-All (DOFA) large patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
weights: Pre-trained model weights to use.
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA large 16 model.
"""
model = DOFA(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
if weights:
missing_keys, unexpected_keys = model.load_state_dict(
weights.get_state_dict(progress=True), strict=False
)
# Both fc_norm and head are generated dynamically
assert set(missing_keys) <= {
"fc_norm.weight",
"fc_norm.bias",
"head.weight",
"head.bias",
}
assert not unexpected_keys
return model |
Dynamic One-For-All (DOFA) huge patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA huge 16 model. | def dofa_huge_patch16_224(**kwargs: Any) -> DOFA:
"""Dynamic One-For-All (DOFA) huge patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2403.15356
.. versionadded:: 0.6
Args:
**kwargs: Additional keywork arguments to pass to :class:`DOFA`.
Returns:
A DOFA huge 16 model.
"""
model = DOFA(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs)
return model |
ResNet-18 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/pdf/1512.03385.pdf
.. versionadded:: 0.4
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to pass to :func:`timm.create_model`
**kwargs: Additional keywork arguments to pass to :func:`timm.create_model`
Returns:
A ResNet-18 model. | def resnet18(
weights: ResNet18_Weights | None = None, *args: Any, **kwargs: Any
) -> ResNet:
"""ResNet-18 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/pdf/1512.03385.pdf
.. versionadded:: 0.4
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to pass to :func:`timm.create_model`
**kwargs: Additional keywork arguments to pass to :func:`timm.create_model`
Returns:
A ResNet-18 model.
"""
if weights:
kwargs["in_chans"] = weights.meta["in_chans"]
model: ResNet = timm.create_model("resnet18", *args, **kwargs)
if weights:
missing_keys, unexpected_keys = model.load_state_dict(
weights.get_state_dict(progress=True), strict=False
)
assert set(missing_keys) <= {"fc.weight", "fc.bias"}
assert not unexpected_keys
return model |
ResNet-50 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/pdf/1512.03385.pdf
.. versionchanged:: 0.4
Switched to multi-weight support API.
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to pass to :func:`timm.create_model`.
**kwargs: Additional keywork arguments to pass to :func:`timm.create_model`.
Returns:
A ResNet-50 model. | def resnet50(
weights: ResNet50_Weights | None = None, *args: Any, **kwargs: Any
) -> ResNet:
"""ResNet-50 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/pdf/1512.03385.pdf
.. versionchanged:: 0.4
Switched to multi-weight support API.
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to pass to :func:`timm.create_model`.
**kwargs: Additional keywork arguments to pass to :func:`timm.create_model`.
Returns:
A ResNet-50 model.
"""
if weights:
kwargs["in_chans"] = weights.meta["in_chans"]
model: ResNet = timm.create_model("resnet50", *args, **kwargs)
if weights:
missing_keys, unexpected_keys = model.load_state_dict(
weights.get_state_dict(progress=True), strict=False
)
assert set(missing_keys) <= {"fc.weight", "fc.bias"}
assert not unexpected_keys
return model |
Swin Transformer v2 base model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2111.09883
.. versionadded:: 0.6
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to
pass to :class:`torchvision.models.swin_transformer.SwinTransformer`.
**kwargs: Additional keywork arguments to
pass to :class:`torchvision.models.swin_transformer.SwinTransformer`.
Returns:
A Swin Transformer Base model. | def swin_v2_b(
weights: Swin_V2_B_Weights | None = None, *args: Any, **kwargs: Any
) -> SwinTransformer:
"""Swin Transformer v2 base model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2111.09883
.. versionadded:: 0.6
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to
pass to :class:`torchvision.models.swin_transformer.SwinTransformer`.
**kwargs: Additional keywork arguments to
pass to :class:`torchvision.models.swin_transformer.SwinTransformer`.
Returns:
A Swin Transformer Base model.
"""
model: SwinTransformer = torchvision.models.swin_v2_b(weights=None, *args, **kwargs)
if weights:
model.load_state_dict(weights.get_state_dict(progress=True), strict=False)
return model |
Vision Transform (ViT) small patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2010.11929
.. versionadded:: 0.4
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to pass to :func:`timm.create_model`.
**kwargs: Additional keywork arguments to pass to :func:`timm.create_model`.
Returns:
A ViT small 16 model. | def vit_small_patch16_224(
weights: ViTSmall16_Weights | None = None, *args: Any, **kwargs: Any
) -> VisionTransformer:
"""Vision Transform (ViT) small patch size 16 model.
If you use this model in your research, please cite the following paper:
* https://arxiv.org/abs/2010.11929
.. versionadded:: 0.4
Args:
weights: Pre-trained model weights to use.
*args: Additional arguments to pass to :func:`timm.create_model`.
**kwargs: Additional keywork arguments to pass to :func:`timm.create_model`.
Returns:
A ViT small 16 model.
"""
if weights:
kwargs["in_chans"] = weights.meta["in_chans"]
model: VisionTransformer = timm.create_model(
"vit_small_patch16_224", *args, **kwargs
)
if weights:
missing_keys, unexpected_keys = model.load_state_dict(
weights.get_state_dict(progress=True), strict=False
)
assert set(missing_keys) <= {"head.weight", "head.bias"}
assert not unexpected_keys
return model |
Convert value to a tuple if it is not already a tuple.
Args:
value: input value
Returns:
value if value is a tuple, else (value, value) | def _to_tuple(value: tuple[float, float] | float) -> tuple[float, float]:
"""Convert value to a tuple if it is not already a tuple.
Args:
value: input value
Returns:
value if value is a tuple, else (value, value)
"""
if isinstance(value, float | int):
return (value, value)
else:
return value |
Returns a random bounding box within a given bounding box.
The ``size`` argument can either be:
* a single ``float`` - in which case the same value is used for the height and
width dimension
* a ``tuple`` of two floats - in which case, the first *float* is used for the
height dimension, and the second *float* for the width dimension
Args:
bounds: the larger bounding box to sample from
size: the size of the bounding box to sample
res: the resolution of the image
Returns:
randomly sampled bounding box from the extent of the input | def get_random_bounding_box(
bounds: BoundingBox, size: tuple[float, float] | float, res: float
) -> BoundingBox:
"""Returns a random bounding box within a given bounding box.
The ``size`` argument can either be:
* a single ``float`` - in which case the same value is used for the height and
width dimension
* a ``tuple`` of two floats - in which case, the first *float* is used for the
height dimension, and the second *float* for the width dimension
Args:
bounds: the larger bounding box to sample from
size: the size of the bounding box to sample
res: the resolution of the image
Returns:
randomly sampled bounding box from the extent of the input
"""
t_size = _to_tuple(size)
# May be negative if bounding box is smaller than patch size
width = (bounds.maxx - bounds.minx - t_size[1]) / res
height = (bounds.maxy - bounds.miny - t_size[0]) / res
minx = bounds.minx
miny = bounds.miny
# Use an integer multiple of res to avoid resampling
minx += int(torch.rand(1).item() * width) * res
miny += int(torch.rand(1).item() * height) * res
maxx = minx + t_size[1]
maxy = miny + t_size[0]
mint = bounds.mint
maxt = bounds.maxt
query = BoundingBox(minx, maxx, miny, maxy, mint, maxt)
return query |
Compute number of :term:`chips <chip>` that can be sampled from a :term:`tile`.
Let :math:`i` be the size of the input tile. Let :math:`k` be the requested size of
the output patch. Let :math:`s` be the requested stride. Let :math:`o` be the number
of output chips sampled from each tile. :math:`o` can then be computed as:
.. math::
o = \left\lceil \frac{i - k}{s} \right\rceil + 1
This is almost identical to relationship 5 in
https://doi.org/10.48550/arXiv.1603.07285. However, we use ceiling instead of floor
because we want to include the final remaining chip in each row/column when bounds
is not an integer multiple of stride.
Args:
bounds: bounding box of tile
size: size of output patch
stride: stride with which to sample (defaults to ``size``)
Returns:
the number of rows/columns that can be sampled
.. versionadded:: 0.4 | def tile_to_chips(
bounds: BoundingBox,
size: tuple[float, float],
stride: tuple[float, float] | None = None,
) -> tuple[int, int]:
r"""Compute number of :term:`chips <chip>` that can be sampled from a :term:`tile`.
Let :math:`i` be the size of the input tile. Let :math:`k` be the requested size of
the output patch. Let :math:`s` be the requested stride. Let :math:`o` be the number
of output chips sampled from each tile. :math:`o` can then be computed as:
.. math::
o = \left\lceil \frac{i - k}{s} \right\rceil + 1
This is almost identical to relationship 5 in
https://doi.org/10.48550/arXiv.1603.07285. However, we use ceiling instead of floor
because we want to include the final remaining chip in each row/column when bounds
is not an integer multiple of stride.
Args:
bounds: bounding box of tile
size: size of output patch
stride: stride with which to sample (defaults to ``size``)
Returns:
the number of rows/columns that can be sampled
.. versionadded:: 0.4
"""
if stride is None:
stride = size
assert stride[0] > 0
assert stride[1] > 0
rows = math.ceil((bounds.maxy - bounds.miny - size[0]) / stride[0]) + 1
cols = math.ceil((bounds.maxx - bounds.minx - size[1]) / stride[1]) + 1
return rows, cols |
Computes the normalized mean squared error between x and y.
Args:
x: tensor x
y: tensor y
Returns:
the normalized MSE between x and y | def normalized_mse(x: Tensor, y: Tensor) -> Tensor:
"""Computes the normalized mean squared error between x and y.
Args:
x: tensor x
y: tensor y
Returns:
the normalized MSE between x and y
"""
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
mse = torch.mean(2 - 2 * (x * y).sum(dim=-1))
return mse |
Data augmentations used by MoCo.
Args:
version: Version of MoCo.
size: Size of patch to crop.
weights: Weight vector for grayscale computation.
Returns:
Data augmentation pipelines. | def moco_augmentations(
version: int, size: int, weights: Tensor
) -> tuple[nn.Module, nn.Module]:
"""Data augmentations used by MoCo.
Args:
version: Version of MoCo.
size: Size of patch to crop.
weights: Weight vector for grayscale computation.
Returns:
Data augmentation pipelines.
"""
# https://github.com/facebookresearch/moco/blob/main/main_moco.py#L326
# https://github.com/facebookresearch/moco-v3/blob/main/main_moco.py#L261
ks = size // 10 // 2 * 2 + 1
if version == 1:
# Same as InstDict: https://arxiv.org/abs/1805.01978
aug1 = aug2 = K.AugmentationSequential(
K.RandomResizedCrop(size=(size, size), scale=(0.2, 1)),
T.RandomGrayscale(weights=weights, p=0.2),
# Not appropriate for multispectral imagery, seasonal contrast used instead
# K.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4, p=1)
K.RandomBrightness(brightness=(0.6, 1.4), p=1.0),
K.RandomContrast(contrast=(0.6, 1.4), p=1.0),
K.RandomHorizontalFlip(),
K.RandomVerticalFlip(), # added
data_keys=["input"],
)
elif version == 2:
# Similar to SimCLR: https://arxiv.org/abs/2002.05709
aug1 = aug2 = K.AugmentationSequential(
K.RandomResizedCrop(size=(size, size), scale=(0.2, 1)),
# Not appropriate for multispectral imagery, seasonal contrast used instead
# K.ColorJitter(
# brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1, p=0.8
# )
K.RandomBrightness(brightness=(0.6, 1.4), p=0.8),
K.RandomContrast(contrast=(0.6, 1.4), p=0.8),
T.RandomGrayscale(weights=weights, p=0.2),
K.RandomGaussianBlur(kernel_size=(ks, ks), sigma=(0.1, 2), p=0.5),
K.RandomHorizontalFlip(),
K.RandomVerticalFlip(), # added
data_keys=["input"],
)
else:
# Same as BYOL: https://arxiv.org/abs/2006.07733
aug1 = K.AugmentationSequential(
K.RandomResizedCrop(size=(size, size), scale=(0.08, 1)),
# Not appropriate for multispectral imagery, seasonal contrast used instead
# K.ColorJitter(
# brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1, p=0.8
# )
K.RandomBrightness(brightness=(0.6, 1.4), p=0.8),
K.RandomContrast(contrast=(0.6, 1.4), p=0.8),
T.RandomGrayscale(weights=weights, p=0.2),
K.RandomGaussianBlur(kernel_size=(ks, ks), sigma=(0.1, 2), p=1),
K.RandomHorizontalFlip(),
K.RandomVerticalFlip(), # added
data_keys=["input"],
)
aug2 = K.AugmentationSequential(
K.RandomResizedCrop(size=(size, size), scale=(0.08, 1)),
# Not appropriate for multispectral imagery, seasonal contrast used instead
# K.ColorJitter(
# brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1, p=0.8
# )
K.RandomBrightness(brightness=(0.6, 1.4), p=0.8),
K.RandomContrast(contrast=(0.6, 1.4), p=0.8),
T.RandomGrayscale(weights=weights, p=0.2),
K.RandomGaussianBlur(kernel_size=(ks, ks), sigma=(0.1, 2), p=0.1),
K.RandomSolarize(p=0.2),
K.RandomHorizontalFlip(),
K.RandomVerticalFlip(), # added
data_keys=["input"],
)
return aug1, aug2 |
Data augmentation used by SimCLR.
Args:
size: Size of patch to crop.
weights: Weight vector for grayscale computation.
Returns:
Data augmentation pipeline. | def simclr_augmentations(size: int, weights: Tensor) -> nn.Module:
"""Data augmentation used by SimCLR.
Args:
size: Size of patch to crop.
weights: Weight vector for grayscale computation.
Returns:
Data augmentation pipeline.
"""
# https://github.com/google-research/simclr/blob/master/data_util.py
ks = size // 10 // 2 * 2 + 1
return K.AugmentationSequential(
K.RandomResizedCrop(size=(size, size), ratio=(0.75, 1.33)),
K.RandomHorizontalFlip(),
K.RandomVerticalFlip(), # added
# Not appropriate for multispectral imagery, seasonal contrast used instead
# K.ColorJitter(brightness=0.8, contrast=0.8, saturation=0.8, hue=0.2, p=0.8)
K.RandomBrightness(brightness=(0.2, 1.8), p=0.8),
K.RandomContrast(contrast=(0.2, 1.8), p=0.8),
T.RandomGrayscale(weights=weights, p=0.2),
K.RandomGaussianBlur(kernel_size=(ks, ks), sigma=(0.1, 2)),
data_keys=["input"],
) |
Extracts a backbone from a lightning checkpoint file.
Args:
path: path to checkpoint file (.ckpt)
Returns:
tuple containing model name and state dict
Raises:
ValueError: if 'model' or 'backbone' not in
checkpoint['hyper_parameters']
.. versionchanged:: 0.4
Renamed from *extract_encoder* to *extract_backbone* | def extract_backbone(path: str) -> tuple[str, "OrderedDict[str, Tensor]"]:
"""Extracts a backbone from a lightning checkpoint file.
Args:
path: path to checkpoint file (.ckpt)
Returns:
tuple containing model name and state dict
Raises:
ValueError: if 'model' or 'backbone' not in
checkpoint['hyper_parameters']
.. versionchanged:: 0.4
Renamed from *extract_encoder* to *extract_backbone*
"""
checkpoint = torch.load(path, map_location=torch.device("cpu"))
if "model" in checkpoint["hyper_parameters"]:
name = checkpoint["hyper_parameters"]["model"]
state_dict = checkpoint["state_dict"]
state_dict = OrderedDict({k: v for k, v in state_dict.items() if "model." in k})
state_dict = OrderedDict(
{k.replace("model.", ""): v for k, v in state_dict.items()}
)
elif "backbone" in checkpoint["hyper_parameters"]:
name = checkpoint["hyper_parameters"]["backbone"]
state_dict = checkpoint["state_dict"]
state_dict = OrderedDict(
{k: v for k, v in state_dict.items() if "model.backbone.model" in k}
)
state_dict = OrderedDict(
{k.replace("model.backbone.model.", ""): v for k, v in state_dict.items()}
)
else:
raise ValueError(
"Unknown checkpoint task. Only backbone or model extraction is supported"
)
return name, state_dict |
Retrieve the input layer name and module from a timm model.
Args:
model: timm model | def _get_input_layer_name_and_module(model: Module) -> tuple[str, Module]:
"""Retrieve the input layer name and module from a timm model.
Args:
model: timm model
"""
keys = []
children = list(model.named_children())
while children != []:
name, module = children[0]
keys.append(name)
children = list(module.named_children())
key = ".".join(keys)
return key, module |
Load pretrained resnet weights to a model.
Args:
model: model to load the pretrained weights to
state_dict: dict containing tensor parameters
Returns:
The missing and unexpected keys
Warns:
If input channels in model != pretrained model input channels
If num output classes in model != pretrained model num classes | def load_state_dict(
model: Module, state_dict: "OrderedDict[str, Tensor]"
) -> tuple[list[str], list[str]]:
"""Load pretrained resnet weights to a model.
Args:
model: model to load the pretrained weights to
state_dict: dict containing tensor parameters
Returns:
The missing and unexpected keys
Warns:
If input channels in model != pretrained model input channels
If num output classes in model != pretrained model num classes
"""
input_module_key, input_module = _get_input_layer_name_and_module(model)
in_channels = input_module.in_channels
expected_in_channels = state_dict[input_module_key + ".weight"].shape[1]
output_module_key, output_module = list(model.named_children())[-1]
if isinstance(output_module, nn.Identity):
num_classes = model.num_features
else:
num_classes = output_module.out_features
expected_num_classes = None
if output_module_key + ".weight" in state_dict:
expected_num_classes = state_dict[output_module_key + ".weight"].shape[0]
if in_channels != expected_in_channels:
warnings.warn(
f"input channels {in_channels} != input channels in pretrained"
f" model {expected_in_channels}. Overriding with new input channels"
)
del state_dict[input_module_key + ".weight"]
if expected_num_classes and num_classes != expected_num_classes:
warnings.warn(
f"num classes {num_classes} != num classes in pretrained model"
f" {expected_num_classes}. Overriding with new num classes"
)
del (
state_dict[output_module_key + ".weight"],
state_dict[output_module_key + ".bias"],
)
missing_keys: list[str]
unexpected_keys: list[str]
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
return missing_keys, unexpected_keys |
Clones a Conv2d layer while optionally retaining some of the original weights.
When replacing the first convolutional layer in a model with one that operates over
different number of input channels, we sometimes want to keep a subset of the kernel
weights the same (e.g. the RGB weights of an ImageNet pretrained model). This is a
convenience function that performs that function.
Args:
layer: the Conv2d layer to initialize
new_in_channels: the new number of input channels
keep_rgb_weights: flag indicating whether to re-initialize the first 3 channels
new_stride: optionally, overwrites the ``layer``'s stride with this value
new_padding: optionally, overwrites the ``layers``'s padding with this value
Returns:
a Conv2d layer with new kernel weights | def reinit_initial_conv_layer(
layer: Conv2d,
new_in_channels: int,
keep_rgb_weights: bool,
new_stride: int | tuple[int, int] | None = None,
new_padding: str | int | tuple[int, int] | None = None,
) -> Conv2d:
"""Clones a Conv2d layer while optionally retaining some of the original weights.
When replacing the first convolutional layer in a model with one that operates over
different number of input channels, we sometimes want to keep a subset of the kernel
weights the same (e.g. the RGB weights of an ImageNet pretrained model). This is a
convenience function that performs that function.
Args:
layer: the Conv2d layer to initialize
new_in_channels: the new number of input channels
keep_rgb_weights: flag indicating whether to re-initialize the first 3 channels
new_stride: optionally, overwrites the ``layer``'s stride with this value
new_padding: optionally, overwrites the ``layers``'s padding with this value
Returns:
a Conv2d layer with new kernel weights
"""
use_bias = layer.bias is not None
if keep_rgb_weights:
w_old = layer.weight.data[:, :3, :, :].clone()
if use_bias:
b_old = cast(Tensor, layer.bias).data.clone()
updated_stride = layer.stride if new_stride is None else new_stride
updated_padding = layer.padding if new_padding is None else new_padding
new_layer = Conv2d(
new_in_channels,
layer.out_channels,
kernel_size=layer.kernel_size, # type: ignore[arg-type]
stride=updated_stride, # type: ignore[arg-type]
padding=updated_padding, # type: ignore[arg-type]
dilation=layer.dilation, # type: ignore[arg-type]
groups=layer.groups,
bias=use_bias,
padding_mode=layer.padding_mode,
)
nn.init.kaiming_normal_(new_layer.weight, mode="fan_out", nonlinearity="relu")
if keep_rgb_weights:
new_layer.weight.data[:, :3, :, :] = w_old
if use_bias:
cast(Tensor, new_layer.bias).data = b_old
return new_layer |
Parse boolean arguments from the command line. | def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag") |
Workaround for ModelEma._load_checkpoint to accept an already-loaded object | def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file) |
This function disables printing when not in master process | def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print |
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. | def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m |
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy. | def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m |
Implements Top2Gating on logits. | def top1gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
capacity_factor=1.0,
eval_mode=False,
moe_eval_capacity_token_fraction=EVAL_CAPACITY_TOKEN_FRACTION,
use_xmoe=False,
gate_obj=None,
) -> Tuple[Tensor, Tensor, Tensor, Dict]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = capacity_factor * S/E
capacity = int(capacity_factor * math.ceil(num_tokens / num_experts))
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1)
mask1 = one_hot(indices1_s, num_classes=num_experts, unsqueeze_indices=True)
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
gates1_s = (gates * mask1).sum(dim=1)
# Compute locations in capacity buffer
locations1 = fused_cumsum_sub_one(mask1)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
if has_tutel:
locations1_s = torch.sum(locations1 * mask1, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[
indices1_s,
],
[
locations1_s,
],
[
gates1_s,
],
)
# Remove locations outside capacity from mask
mask1 = mask1 * torch.lt(locations1, capacity)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
# locations1_sc = num_tokens * capacity
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
dispatch_mask = combine1_sec.bool()
if use_fp32:
return l_aux, combine1_sec.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine1_sec, dispatch_mask, metadata |
Implements Top2Gating on logits. | def top2gating(
logits: torch.Tensor,
input_mask: Optional[torch.Tensor] = None,
use_fp32=False,
second_expert_policy="sampling",
normalize_gate_prob_before_dropping=False,
eval_mode=False,
moe_eval_capacity_token_fraction=0.25,
batch_prioritized_routing=False,
) -> Tuple[Tensor, Tensor, Tensor]:
"""Implements Top2Gating on logits."""
metadata = {}
if use_fp32:
orig_dtype = logits.dtype
logits = logits.float()
gates = F.softmax(logits, dim=1)
metadata["entropy_gating"] = entropy(probs=gates).mean().detach()
# gates has shape of SE
num_tokens = gates.shape[0]
num_experts = gates.shape[1]
if moe_eval_capacity_token_fraction > 0.0 and eval_mode:
capacity = math.ceil(moe_eval_capacity_token_fraction * num_tokens)
else:
# capacity = 2S/E
capacity = 2 * math.ceil(num_tokens / num_experts)
# Create a mask for 1st's expert per token
indices1_s = torch.argmax(gates, dim=1, keepdim=True)
mask1 = one_hot(indices1_s, num_experts)
if second_expert_policy == "sampling":
# Create a mask for 2nd's expert per token using Gumbel-max trick
# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/
logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)
else:
logits_w_noise = logits
# Replace top-expert with min value
logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float("-inf"))
indices2_s = torch.argmax(logits_except1, dim=1, keepdim=True)
mask2 = one_hot(indices2_s, num_experts)
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
if normalize_gate_prob_before_dropping:
# Normalize gate probabilities
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s = gates1_s / denom_s
gates2_s = gates2_s / denom_s
if second_expert_policy == "random":
sampled = (2 * gates2_s) > torch.rand_like(gates2_s)
mask2 = mask2 * sampled.repeat(num_experts, 1).transpose(1, 0)
# Compute locations in capacity buffer
if input_mask is not None and input_mask.any():
nonpadding = ~input_mask
mask1 = mask1 * nonpadding.unsqueeze(-1).to(mask1.dtype)
mask2 = mask2 * nonpadding.unsqueeze(-1).to(mask1.dtype)
if batch_prioritized_routing:
# if batch_prioritized_routing:
importance_scores = -1 * gates.max(dim=1)[0]
sorted_mask1 = mask1[importance_scores.argsort(dim=0)]
sorted_cumsum1 = fused_cumsum_sub_one(sorted_mask1) * sorted_mask1
importance_sorted_locations1 = sorted_cumsum1[
importance_scores.argsort(dim=0).argsort(dim=0)
]
sorted_mask2 = mask2[importance_scores.argsort(dim=0)]
sorted_cumsum2 = fused_cumsum_sub_one(sorted_mask2) * sorted_mask2
importance_sorted_locations2 = sorted_cumsum2[
importance_scores.argsort(dim=0).argsort(dim=0)
]
importance_sorted_locations2 += torch.sum(mask1, dim=0, keepdim=True)
locations1, locations2 = (
importance_sorted_locations1,
importance_sorted_locations2,
)
else:
locations1 = fused_cumsum_sub_one(mask1)
locations2 = fused_cumsum_sub_one(mask2)
# Update 2nd's location by accounting for locations of 1st
locations2 += torch.sum(mask1, dim=0, keepdim=True)
# Compute l_aux
me = torch.mean(gates, dim=0)
ce = torch.mean(mask1.to(gates.dtype), dim=0)
l_aux = torch.mean(me * ce)
l_aux = l_aux * num_experts * num_experts
# for logging purposes
metadata["overflow_expert1"] = (
100 * torch.sum(mask1 * torch.ge(locations1, capacity)) / torch.sum(mask1)
)
metadata["overflow_expert2"] = (
100 * torch.sum(mask2 * torch.ge(locations2, capacity)) / torch.sum(mask2)
)
# Remove locations outside capacity from mask
mask1_, mask2_ = mask1, mask2
mask1 = mask1 * torch.lt(locations1, capacity)
mask2 = mask2 * torch.lt(locations2, capacity)
# for logging (percent of tokens routed to each expert)
expert1_hist = (
100
* torch.histc(
(indices1_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert1_count"] = (expert1_hist == 0).sum()
expert1_hist = (
torch.sort(expert1_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
expert2_hist = (
100
* torch.histc(
(indices2_s.squeeze() + 1), bins=num_experts, min=1, max=num_experts
)
/ num_tokens
)
metadata["unused_expert2_count"] = (expert2_hist == 0).sum()
expert2_hist = (
torch.sort(expert2_hist, dim=0, descending=True).values
+ torch.finfo(torch.float32).tiny
)
sample_count = max(math.ceil(num_experts * SAMPLE_FRACTION), 1)
metadata["expert1_balance_top"] = expert1_hist[:sample_count].sum()
metadata["expert1_balance_bottom"] = expert1_hist[-sample_count:].sum()
metadata["expert2_balance_top"] = expert2_hist[:sample_count].sum()
metadata["expert2_balance_bottom"] = expert2_hist[-sample_count:].sum()
if not normalize_gate_prob_before_dropping:
# Normalize gate probabilities
gates1_s = (gates * mask1).sum(dim=1)
gates2_s = (gates * mask2).sum(dim=1)
denom_s = gates1_s + gates2_s
# Avoid divide-by-zero
denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)
gates1_s /= denom_s
gates2_s /= denom_s
if has_tutel:
locations1_s = torch.sum(locations1 * mask1_, dim=1)
locations2_s = torch.sum(locations2 * mask2_, dim=1)
return (
l_aux,
metadata,
capacity,
num_experts,
[indices1_s, indices2_s],
[locations1_s, locations2_s],
[gates1_s, gates2_s],
)
# Store the capacity location for each token
locations1_s = torch.sum(locations1 * mask1, dim=1)
locations2_s = torch.sum(locations2 * mask2, dim=1)
# Calculate combine_weights and dispatch_mask
gates1 = gates1_s.unsqueeze(-1) * mask1.to(gates1_s.dtype) # einsum("s,se->se")
gates2 = gates2_s.unsqueeze(-1) * mask2.to(gates2_s.dtype) # einsum("s,se->se")
locations1_sc = one_hot(locations1_s, num_classes=capacity, unsqueeze_indices=True)
locations2_sc = one_hot(locations2_s, num_classes=capacity, unsqueeze_indices=True)
combine1_sec = torch.bmm(
# einsum("se,sc->sec")
gates1.unsqueeze(-1),
locations1_sc.to(gates1.dtype).unsqueeze(1),
)
combine2_sec = torch.bmm(
# einsum("se,sc->sec")
gates2.unsqueeze(-1),
locations2_sc.to(gates2.dtype).unsqueeze(1),
)
combine_weights = combine1_sec + combine2_sec
dispatch_mask = combine_weights.bool()
if use_fp32:
return l_aux, combine_weights.to(orig_dtype), dispatch_mask, metadata
else:
return l_aux, combine_weights, dispatch_mask, metadata |
Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process | def _oauth_signature(
consumer_token: Dict[str, Any],
method: str,
url: str,
parameters: Dict[str, Any] = {},
token: Optional[Dict[str, Any]] = None,
) -> bytes:
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urllib.parse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append(
"&".join(
"%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items())
)
)
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1] |
Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process | def _oauth10a_signature(
consumer_token: Dict[str, Any],
method: str,
url: str,
parameters: Dict[str, Any] = {},
token: Optional[Dict[str, Any]] = None,
) -> bytes:
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urllib.parse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append(
"&".join(
"%s=%s" % (k, _oauth_escape(str(v))) for k, v in sorted(parameters.items())
)
)
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib.parse.quote(consumer_token["secret"], safe="~"))]
key_elems.append(
escape.utf8(urllib.parse.quote(token["secret"], safe="~") if token else "")
)
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1] |
Begins watching source files for changes.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed. | def start(check_time: int = 500) -> None:
"""Begins watching source files for changes.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
io_loop = ioloop.IOLoop.current()
if io_loop in _io_loops:
return
_io_loops[io_loop] = True
if len(_io_loops) > 1:
gen_log.warning("tornado.autoreload started more than once in the same process")
modify_times: Dict[str, float] = {}
callback = functools.partial(_reload_on_update, modify_times)
scheduler = ioloop.PeriodicCallback(callback, check_time)
scheduler.start() |
Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`) | def wait() -> None:
"""Wait for a watched file to change, then restart the process.
Intended to be used at the end of scripts like unit test runners,
to run the tests again after any source file changes (but see also
the command-line interface in `main`)
"""
io_loop = ioloop.IOLoop()
io_loop.add_callback(start)
io_loop.start() |
Add a file to the watch list.
All imported modules are watched by default. | def watch(filename: str) -> None:
"""Add a file to the watch list.
All imported modules are watched by default.
"""
_watched_files.add(filename) |
Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
`os.set_inheritable`) instead of using a reload hook to close them. | def add_reload_hook(fn: Callable[[], None]) -> None:
"""Add a function to be called before reloading the process.
Note that for open file and socket handles it is generally
preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or
`os.set_inheritable`) instead of using a reload hook to close them.
"""
_reload_hooks.append(fn) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.