inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
β | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
# camp_zipnerf/internal/spin_math.py
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
# camp_zipnerf/internal/rigid_body.py
def ortho6d_from_rotation_matrix(rotation_matrix):
"""Converts a matrix to an ortho6d by taking the first two columns."""
return rotation_matrix[Ellipsis, :2, :].reshape(*rotation_matrix.shape[:-2], 6)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
<fim_suffix>
basis = verts[:, ::-1]
return basis
<fim_middle>if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :] | if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :] | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/render.py
def gaussianize_frustum(t0, t1):
"""Convert intervals along a conical frustum into means and variances."""
# A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415.
s = t0 + t1
d = t1 - t0
eps = jnp.finfo(jnp.float32).eps ** 2
ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2)
t_mean = s * (1 / 2 + ratio)
t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2)
r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio)
return t_mean, t_var, r_var
# camp_zipnerf/internal/train_utils.py
def cosine_sequential_grid_weight_fn(step: int) -> chex.Array:
target_grid_size = target_resolution_fn(step)
log_grid_size = jnp.log2(grid_size)
# Compute how far off the current target size is from the previous
# scale. To make the math simple, we assume that the grid sizes are
# ideal (i.e., they might be fractional).
scale_diff_to_target = jnp.log2(target_grid_size) - (
log_grid_size - 1 / scale_supersample
)
weight = jnp.clip(scale_supersample * scale_diff_to_target, 0.0, 1.0)
# Make the window cosiney.
weight = 0.5 * (1 + jnp.cos(jnp.pi * weight + jnp.pi))
return weight
# camp_zipnerf/internal/math.py
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
<fim_suffix>
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle>if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__] | if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__] | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
# camp_zipnerf/internal/stepfun.py
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
# camp_zipnerf/internal/math.py
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
<fim_suffix>
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
<fim_middle>if mat1 is None:
mat1 = mat0 | if mat1 is None:
mat1 = mat0 | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/linspline.py
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
# camp_zipnerf/internal/vis.py
def visualize_cmap(
value,
weight,
colormap,
lo=None,
hi=None,
percentile=99.0,
curve_fn=lambda x: x,
modulus=None,
matte_background=True,
):
"""Visualize a 1D image and a 1D weighting according to some colormap.
Args:
value: A 1D image.
weight: A weight map, in [0, 1].
colormap: A colormap function.
lo: The lower bound to use when rendering, if None then use a percentile.
hi: The upper bound to use when rendering, if None then use a percentile.
percentile: What percentile of the value map to crop to when automatically
generating `lo` and `hi`. Depends on `weight` as well as `value'.
curve_fn: A curve function that gets applied to `value`, `lo`, and `hi`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
modulus: If not None, mod the normalized value by `modulus`. Use (0, 1]. If
`modulus` is not None, `lo`, `hi` and `percentile` will have no effect.
matte_background: If True, matte the image over a checkerboard.
Returns:
A colormap rendering.
"""
# Identify the values that bound the middle of `value' according to `weight`.
lo_auto, hi_auto = weighted_percentile(
value, weight, [50 - percentile / 2, 50 + percentile / 2]
)
# If `lo` or `hi` are None, use the automatically-computed bounds above.
eps = jnp.finfo(jnp.float32).eps
lo = lo or (lo_auto - eps)
hi = hi or (hi_auto + eps)
# Curve all values.
value, lo, hi = [curve_fn(x) for x in [value, lo, hi]]
# Wrap the values around if requested.
if modulus:
value = jnp.mod(value, modulus) / modulus
else:
# Otherwise, just scale to [0, 1].
value = jnp.clip((value - jnp.minimum(lo, hi)) / jnp.abs(hi - lo), 0, 1)
value = jnp.nan_to_num(value)
if colormap:
colorized = colormap(value)[Ellipsis, :3]
else:
if value.shape[-1] != 3:
raise ValueError(f'value must have 3 channels but has {value.shape[-1]}')
colorized = value
return matte(colorized, weight) if matte_background else colorized
# camp_zipnerf/internal/camera_delta.py
def _create_points_from_contracted_frustum(
self,
camera: jaxcam.Camera,
rng: chex.PRNGKey,
sample_depth_contracted: bool = True,
) -> jnp.ndarray:
"""Samples points uniformly in the contracted frustum.
We first compute the contracted camera frustum by intersecting camera rays
with the bounding sphere (which has radius 2). This defines a frustum from
the near plane to infinity. We can then apply the inverse of the contraction
to the points to get the metric point samples.
Args:
camera: The camera used to compute the frustum.
rng: A PRNGKey used to sample points.
sample_depth_contracted: If True, sample the depth in the contracted
space. Otherwise, sample linearly in metric space.
Returns:
Points sampled uniformly in the contracted frustum.
"""
if self.precondition_far >= 2.0:
raise ValueError('Far plane must be <2 when using contracted planes.')
rng, key1, key2 = random.split(rng, 3)
pixels = (
random.uniform(key1, (self.precondition_num_points, 2))
* jnp.array([camera.image_size_x - 1, camera.image_size_y - 1])
+ 0.5
)
rays = jaxcam.pixels_to_rays(camera, pixels)
near_points = camera.position + rays * self.precondition_near
far_points = geometry.ray_sphere_intersection(
camera.position, rays, radius=self.precondition_far
)
s_dist = random.uniform(key2, (self.precondition_num_points, 1))
if sample_depth_contracted:
# Lerp between contracted near and far plane.
points = s_dist * far_points + (1 - s_dist) * near_points
points = coord.inv_contract(points)
else:
# Lerp between uncontracted near and far plane.
near_points = coord.inv_contract(near_points)
far_points = coord.inv_contract(far_points)
points = s_dist * far_points + (1 - s_dist) * near_points
return points
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
<fim_suffix>
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1)) | if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1)) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/linspline.py
def compute_integral(t, y):
"""Integrate a linear spline into a piecewise quadratic spline."""
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1)
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
# camp_zipnerf/internal/linspline.py
def query(tq, t, v):
"""Query linear spline (t, v) at tq."""
utils.assert_valid_linspline(t, v)
interp = functools.partial(jnp.interp, left=0, right=0)
return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v)
# camp_zipnerf/internal/linspline.py
def integrate(t, w):
"""Integrate (t, w) according to the trapezoid rule."""
utils.assert_valid_linspline(t, w)
return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
<fim_suffix>
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle>wq = jnp.diff(acc_wq, axis=-1) | wq = jnp.diff(acc_wq, axis=-1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/quaternion.py
def from_axis_angle(
axis_angle, eps = jnp.finfo(jnp.float32).eps
):
"""Constructs a quaternion for the given axis/angle rotation.
Args:
axis_angle: A 3-vector where the direction is the axis of rotation and the
magnitude is the angle of rotation.
eps: A small number used for numerical stability around zero rotations.
Returns:
A quaternion encoding the same rotation.
"""
theta_squared = jnp.sum(axis_angle**2, axis=-1)
theta = _safe_sqrt(theta_squared)
half_theta = theta / 2.0
k = jnp.sin(half_theta) / theta
# Avoid evaluating sqrt when theta is close to zero.
k = jnp.where(theta_squared > eps**2, k, 0.5)
qw = jnp.where(theta_squared > eps**2, jnp.cos(half_theta), 1.0)
qx = axis_angle[0] * k
qy = axis_angle[1] * k
qz = axis_angle[2] * k
return jnp.squeeze(jnp.array([qx, qy, qz, qw]))
# camp_zipnerf/internal/grid_utils.py
def grid_sizes(self):
"""Returns the grid sizes."""
desired_num_scales = 1 + self.scale_supersample * onp.log2(
self.max_grid_size / self.min_grid_size
)
num_scales = int(onp.round(desired_num_scales))
if onp.abs(desired_num_scales - num_scales) > 1e-4:
raise ValueError(
'grid scale parameters are ('
+ f'min_grid_size={self.min_grid_size}, '
+ f'max_grid_size={self.max_grid_size}, '
+ f'scale_supersample={self.scale_supersample}), '
+ f'which yields a non-integer number of scales {desired_num_scales}.'
)
return onp.round(
onp.geomspace(
self.min_grid_size,
self.max_grid_size,
num_scales,
)
).astype(onp.int32)
# camp_zipnerf/internal/quaternion.py
def exp(q, eps = 1e-8):
"""Computes the quaternion exponential.
References:
https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions
Args:
q: the quaternion in (x,y,z,w) format or (x,y,z) if is_pure is True.
eps: an epsilon value for numerical stability.
Returns:
The exponential of q.
"""
is_pure = q.shape[-1] == 3
if is_pure:
s = jnp.zeros_like(q[Ellipsis, -1:])
v = q
else:
v = im(q)
s = re(q)
norm_v = linalg.norm(v, axis=-1, keepdims=True)
exp_s = jnp.exp(s)
w = jnp.cos(norm_v)
xyz = jnp.sin(norm_v) * v / jnp.maximum(norm_v, eps * jnp.ones_like(norm_v))
return exp_s * jnp.concatenate((xyz, w), axis=-1)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
<fim_suffix>
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
<fim_middle>new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) | new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
# camp_zipnerf/internal/linspline.py
def interpolate_integral(tq, t, a, b, c):
"""Interpolate into the piecewise quadratic returned by compute_integral()."""
utils.assert_valid_stepfun(t, a)
utils.assert_valid_stepfun(t, b)
utils.assert_valid_stepfun(t, c)
# Clip to valid inputs (assumes repeating boundaries).
tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:]))
# Lookup the quadratic coefficients corresponding to each input query.
idx0, _ = sorted_lookup(tq, t)
# TODO(barron): It might be faster to stack (a, c, b) during generation and
# do a single gather.
t0 = jnp.take_along_axis(t, idx0, axis=-1)
a0 = jnp.take_along_axis(a, idx0, axis=-1)
b0 = jnp.take_along_axis(b, idx0, axis=-1)
c0 = jnp.take_along_axis(c, idx0, axis=-1)
td = tq - t0
v = a0 * td**2 + b0 * td + c0
return v
# camp_zipnerf/internal/linspline.py
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
<fim_suffix>
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle>return t_new | return t_new | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/linspline.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
# camp_zipnerf/internal/utils.py
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
# camp_zipnerf/internal/render.py
def compute_alpha_weights_helper(density_delta):
"""Helper function for compute_alpha_weights."""
log_trans = -jnp.concatenate(
[
jnp.zeros_like(density_delta[Ellipsis, :1]),
jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1),
],
axis=-1,
)
alpha = 1 - jnp.exp(-density_delta)
trans = jnp.exp(log_trans)
weights = alpha * trans
return weights
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for linear splines."""
import functools
from internal import math
from internal import utils
import jax
from jax.experimental import checkify
import jax.numpy as jnp
def check_zero_endpoints(y):
checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.')
checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.')
def query(tq, t, v):
"""Query linear spline (t, v) at tq."""
utils.assert_valid_linspline(t, v)
interp = functools.partial(jnp.interp, left=0, right=0)
return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v)
def integrate(t, w):
"""Integrate (t, w) according to the trapezoid rule."""
utils.assert_valid_linspline(t, w)
return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1)
def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2):
"""Make w integrate to 1."""
utils.assert_valid_linspline(t, w)
return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None]
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
def clamp(t, y, minval, maxval):
"""Clamp (t, y) to be zero outside of t in [minval, maxval]."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Add in extra points at and immediately above/below the min/max vals.
ti = jnp.concatenate(
[
math.minus_eps(minval),
minval,
maxval,
math.plus_eps(maxval),
],
axis=-1,
)
tc, yo = insert_knot(ti, t, y)
# Zero the spline values outside of [minval, maxval].
yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo))
return tc, yc
def compute_integral(t, y):
"""Integrate a linear spline into a piecewise quadratic spline."""
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
<fim_suffix>
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
def interpolate_integral(tq, t, a, b, c):
"""Interpolate into the piecewise quadratic returned by compute_integral()."""
utils.assert_valid_stepfun(t, a)
utils.assert_valid_stepfun(t, b)
utils.assert_valid_stepfun(t, c)
# Clip to valid inputs (assumes repeating boundaries).
tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:]))
# Lookup the quadratic coefficients corresponding to each input query.
idx0, _ = sorted_lookup(tq, t)
# TODO(barron): It might be faster to stack (a, c, b) during generation and
# do a single gather.
t0 = jnp.take_along_axis(t, idx0, axis=-1)
a0 = jnp.take_along_axis(a, idx0, axis=-1)
b0 = jnp.take_along_axis(b, idx0, axis=-1)
c0 = jnp.take_along_axis(c, idx0, axis=-1)
td = tq - t0
v = a0 * td**2 + b0 * td + c0
return v
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
# Dilate the t-values by at least numerical epsilon in each direction.
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
<fim_middle>c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1) | c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/coord.py
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
# camp_zipnerf/internal/spin_math.py
def matmul(a, b):
"""jnp.matmul defaults to bfloat16 on TPU, but this doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
# camp_zipnerf/internal/camera_utils.py
def rotation_about_axis(degrees, axis=0):
"""Creates rotation matrix about one of the coordinate axes."""
radians = degrees / 180.0 * np.pi
rot2x2 = np.array(
[[np.cos(radians), -np.sin(radians)], [np.sin(radians), np.cos(radians)]]
)
r = np.eye(3)
r[1:3, 1:3] = rot2x2
r = np.roll(np.roll(r, axis, axis=0), axis, axis=1)
p = np.eye(4)
p[:3, :3] = r
return p
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
<fim_suffix>
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>return safe_trig_helper(x, jnp.sin) | return safe_trig_helper(x, jnp.sin) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/render.py
def gaussianize_frustum(t0, t1):
"""Convert intervals along a conical frustum into means and variances."""
# A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415.
s = t0 + t1
d = t1 - t0
eps = jnp.finfo(jnp.float32).eps ** 2
ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2)
t_mean = s * (1 / 2 + ratio)
t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2)
r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio)
return t_mean, t_var, r_var
# camp_zipnerf/internal/render.py
def lift_gaussian(d, t_mean, t_var, r_var, diag):
"""Lift a Gaussian defined along a ray to 3D coordinates."""
mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None]
d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True))
if diag:
d_outer_diag = d**2
null_outer_diag = 1 - d_outer_diag / d_mag_sq
t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :]
xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :]
cov_diag = t_cov_diag + xy_cov_diag
return mean, cov_diag
else:
d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :]
eye = jnp.eye(d.shape[-1])
null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :]
t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :]
xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :]
cov = t_cov + xy_cov
return mean, cov
# camp_zipnerf/internal/rigid_body.py
def rotation_matrix_from_ortho6d(ortho6d):
"""Computes the 3D rotation matrix from the 6D representation.
Zhou et al. have proposed a novel 6D representation for the rotation in
SO(3) which is completely continuous. This is highly benificial and produces
better results than most standard rotation representations for many tasks,
especially when the predicted value is close to the discontinuity of the
utilized rotation represantation. This function converts from the proposed 6
dimensional representation to the classic 3x3 rotation matrix.
See https://arxiv.org/pdf/1812.07035.pdf for more information.
Args:
ortho6d: 6D represantion for the rotation according Zhou et al. of shape
[6].
Returns:
(3, 3) The associated 3x3 rotation matrices.
"""
if ortho6d.ndim != 1 or ortho6d.shape[0] != 6:
raise ValueError('The shape of the input ortho 6D vector needs to be (6).')
a1, a2 = ortho6d[Ellipsis, :3], ortho6d[Ellipsis, 3:]
b1 = spin_math.normalize(a1)
b2 = a2 - jnp.sum(b1 * a2, axis=-1, keepdims=True) * b1
b2 = spin_math.normalize(b2)
b3 = jnp.cross(b1, b2)
return jnp.stack((b1, b2, b3), axis=-2)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
<fim_suffix>
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle>return z | return z | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/linspline.py
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
# camp_zipnerf/internal/vis.py
def visualize_cmap(
value,
weight,
colormap,
lo=None,
hi=None,
percentile=99.0,
curve_fn=lambda x: x,
modulus=None,
matte_background=True,
):
"""Visualize a 1D image and a 1D weighting according to some colormap.
Args:
value: A 1D image.
weight: A weight map, in [0, 1].
colormap: A colormap function.
lo: The lower bound to use when rendering, if None then use a percentile.
hi: The upper bound to use when rendering, if None then use a percentile.
percentile: What percentile of the value map to crop to when automatically
generating `lo` and `hi`. Depends on `weight` as well as `value'.
curve_fn: A curve function that gets applied to `value`, `lo`, and `hi`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
modulus: If not None, mod the normalized value by `modulus`. Use (0, 1]. If
`modulus` is not None, `lo`, `hi` and `percentile` will have no effect.
matte_background: If True, matte the image over a checkerboard.
Returns:
A colormap rendering.
"""
# Identify the values that bound the middle of `value' according to `weight`.
lo_auto, hi_auto = weighted_percentile(
value, weight, [50 - percentile / 2, 50 + percentile / 2]
)
# If `lo` or `hi` are None, use the automatically-computed bounds above.
eps = jnp.finfo(jnp.float32).eps
lo = lo or (lo_auto - eps)
hi = hi or (hi_auto + eps)
# Curve all values.
value, lo, hi = [curve_fn(x) for x in [value, lo, hi]]
# Wrap the values around if requested.
if modulus:
value = jnp.mod(value, modulus) / modulus
else:
# Otherwise, just scale to [0, 1].
value = jnp.clip((value - jnp.minimum(lo, hi)) / jnp.abs(hi - lo), 0, 1)
value = jnp.nan_to_num(value)
if colormap:
colorized = colormap(value)[Ellipsis, :3]
else:
if value.shape[-1] != 3:
raise ValueError(f'value must have 3 channels but has {value.shape[-1]}')
colorized = value
return matte(colorized, weight) if matte_background else colorized
# camp_zipnerf/internal/camera_delta.py
def _create_points_from_contracted_frustum(
self,
camera: jaxcam.Camera,
rng: chex.PRNGKey,
sample_depth_contracted: bool = True,
) -> jnp.ndarray:
"""Samples points uniformly in the contracted frustum.
We first compute the contracted camera frustum by intersecting camera rays
with the bounding sphere (which has radius 2). This defines a frustum from
the near plane to infinity. We can then apply the inverse of the contraction
to the points to get the metric point samples.
Args:
camera: The camera used to compute the frustum.
rng: A PRNGKey used to sample points.
sample_depth_contracted: If True, sample the depth in the contracted
space. Otherwise, sample linearly in metric space.
Returns:
Points sampled uniformly in the contracted frustum.
"""
if self.precondition_far >= 2.0:
raise ValueError('Far plane must be <2 when using contracted planes.')
rng, key1, key2 = random.split(rng, 3)
pixels = (
random.uniform(key1, (self.precondition_num_points, 2))
* jnp.array([camera.image_size_x - 1, camera.image_size_y - 1])
+ 0.5
)
rays = jaxcam.pixels_to_rays(camera, pixels)
near_points = camera.position + rays * self.precondition_near
far_points = geometry.ray_sphere_intersection(
camera.position, rays, radius=self.precondition_far
)
s_dist = random.uniform(key2, (self.precondition_num_points, 1))
if sample_depth_contracted:
# Lerp between contracted near and far plane.
points = s_dist * far_points + (1 - s_dist) * near_points
points = coord.inv_contract(points)
else:
# Lerp between uncontracted near and far plane.
near_points = coord.inv_contract(near_points)
far_points = coord.inv_contract(far_points)
points = s_dist * far_points + (1 - s_dist) * near_points
return points
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
<fim_suffix>
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>vals = [] | vals = [] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geometry.py
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
# camp_zipnerf/internal/geometry.py
def compute_bbox_from_xyza(
xyza,
padding,
alpha_threshold = 0.99,
):
"""Computes a bounding box given an xyza array.
Args:
xyza: An array of shape (..., 4) containing the XYZ coordinates in the first
three channels and an alpha value in the last.
padding: A padding value to be added to all sides.
alpha_threshold: The threshold at which to binarize the alpha into a mask.
Returns:
A bounding box of shape (2, 3) containing (min_coords, max_coords).
"""
padding = onp.array(padding)
xyz = xyza[Ellipsis, :3]
alpha = xyza[Ellipsis, 3]
mask = alpha > alpha_threshold
xyz = xyz[mask]
xyz = xyz.reshape(-1, 3)
min_coord = xyz.min(axis=0) - padding
max_coord = xyz.max(axis=0) + padding
return onp.stack([min_coord, max_coord], axis=0)
# camp_zipnerf/internal/spin_math.py
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
<fim_suffix>
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>ide = sph_harms * jnp.exp(-sigma * kappa_inv) | ide = sph_harms * jnp.exp(-sigma * kappa_inv) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geometry.py
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
# camp_zipnerf/internal/geometry.py
def compute_bbox_from_xyza(
xyza,
padding,
alpha_threshold = 0.99,
):
"""Computes a bounding box given an xyza array.
Args:
xyza: An array of shape (..., 4) containing the XYZ coordinates in the first
three channels and an alpha value in the last.
padding: A padding value to be added to all sides.
alpha_threshold: The threshold at which to binarize the alpha into a mask.
Returns:
A bounding box of shape (2, 3) containing (min_coords, max_coords).
"""
padding = onp.array(padding)
xyz = xyza[Ellipsis, :3]
alpha = xyza[Ellipsis, 3]
mask = alpha > alpha_threshold
xyz = xyz[mask]
xyz = xyz.reshape(-1, 3)
min_coord = xyz.min(axis=0) - padding
max_coord = xyz.max(axis=0) + padding
return onp.stack([min_coord, max_coord], axis=0)
# camp_zipnerf/internal/spin_math.py
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
<fim_suffix>
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) | vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/render.py
def lift_gaussian(d, t_mean, t_var, r_var, diag):
"""Lift a Gaussian defined along a ray to 3D coordinates."""
mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None]
d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True))
if diag:
d_outer_diag = d**2
null_outer_diag = 1 - d_outer_diag / d_mag_sq
t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :]
xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :]
cov_diag = t_cov_diag + xy_cov_diag
return mean, cov_diag
else:
d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :]
eye = jnp.eye(d.shape[-1])
null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :]
t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :]
xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :]
cov = t_cov + xy_cov
return mean, cov
# camp_zipnerf/internal/loss_utils.py
def eikonal_equation(n, eps=jnp.finfo(jnp.float32).tiny):
"""Compute eikonal equation on normals, checking how close norm is to 1."""
norm = jnp.sqrt(jnp.maximum(jnp.sum(n**2, axis=-1), eps))
return jnp.mean((norm - 1.0) ** 2.0)
# camp_zipnerf/internal/utils.py
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
<fim_suffix>
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
# Guard against NaN outputs when `det` is super small. Note that this does not
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle>z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) | z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
<fim_suffix>
thread_fn_future.result()
return result_fn
return decorator
<fim_middle># Thread exception will be raised here | # Thread exception will be raised here | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/utils.py
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
# camp_zipnerf/internal/geopoly.py
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
# camp_zipnerf/internal/datasets.py
def get_train_cameras(
self, config: configs.Config, return_jax_cameras: bool = False
) -> tuple[chex.Array, chex.Array, Any, Any, Any]:
"""Returns cameras to be used for training.
Args:
config: The config to use.
return_jax_cameras: If True, will return JAX camera instances rather than
the camera tuple.
Returns:
A camera tuple consistent with `self.cameras` or a JAX camera instance if
`return_jax_cameras` is True.
"""
if config.use_identity_cameras:
cameras = self._get_identity_cameras()
elif config.use_perturbed_cameras:
cameras = self._get_perturbed_cameras(config)
else:
cameras = self.cameras
pixtocams, poses, distortion_params = cameras[:3]
# Set the distortion params to not be None of we are optimizing for cameras.
if config.optimize_cameras and not distortion_params:
distortion_params = {
'k1': 0.0,
'k2': 0.0,
'k3': 0.0,
}
distortion_params = jax.tree_util.tree_map(
lambda x: np.zeros(self.cameras[0].shape[0]), distortion_params
)
cameras = (pixtocams, poses, distortion_params, *cameras[3:])
if return_jax_cameras:
image_sizes = np.array([(x.shape[1], x.shape[0]) for x in self.images])
return jax.vmap(self.jax_camera_from_tuple_fn)(cameras, image_sizes)
return cameras
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
<fim_suffix>
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle># Sample a set of points from the step function. | # Sample a set of points from the step function. | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geometry.py
def spherical_to_cartesian(
r,
theta,
phi,
):
"""Converts spherical to cartesian coordinates.
For more details see cartesian_to_spherical below.
Args:
r: (..., 1) Radius of spherical coordinate.
theta: (..., 1) Elevation of spherical coordinate.
phi: (..., 1) Azimuth of spherical coordinate.
Returns:
Cartesian coordinates of shape (..., 3) defined by x, y, z.
"""
x = r * jnp.sin(theta) * jnp.cos(phi)
y = r * jnp.sin(theta) * jnp.sin(phi)
z = r * jnp.cos(theta)
return jnp.stack([x, y, z], axis=-1)
# camp_zipnerf/internal/geometry.py
def compute_bbox_from_xyza(
xyza,
padding,
alpha_threshold = 0.99,
):
"""Computes a bounding box given an xyza array.
Args:
xyza: An array of shape (..., 4) containing the XYZ coordinates in the first
three channels and an alpha value in the last.
padding: A padding value to be added to all sides.
alpha_threshold: The threshold at which to binarize the alpha into a mask.
Returns:
A bounding box of shape (2, 3) containing (min_coords, max_coords).
"""
padding = onp.array(padding)
xyz = xyza[Ellipsis, :3]
alpha = xyza[Ellipsis, 3]
mask = alpha > alpha_threshold
xyz = xyz[mask]
xyz = xyz.reshape(-1, 3)
min_coord = xyz.min(axis=0) - padding
max_coord = xyz.max(axis=0) + padding
return onp.stack([min_coord, max_coord], axis=0)
# camp_zipnerf/internal/spin_math.py
def apply_homogeneous_transform(transform,
vectors):
"""Apply a homogeneous transformation to a collection of vectors.
Args:
transform: (C+1,C+1) A homogeneous transformation matrix.
vectors: (*,C) An array containing 3D points.
Returns:
(*,C) The points transformed by the array.
"""
vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1])))
transformed = from_homogeneous(matmul(transform, vectors_h.T).T)
return transformed.reshape(vectors.shape)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
<fim_suffix>
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle># concentration parameter, kappa. | # concentration parameter, kappa. | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geopoly.py
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
# camp_zipnerf/internal/ref_utils.py
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
# camp_zipnerf/internal/linspline.py
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
<fim_suffix>
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle># The loss incurred within each individual interval with itself. | # The loss incurred within each individual interval with itself. | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/linspline.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/rigid_body.py
def exp_so3(
axis_angle, eps=jnp.finfo(jnp.float32).eps
):
"""Exponential map from Lie algebra so3 to Lie group SO3.
Modern Robotics Eqn 3.51, a.k.a. Rodrigues' formula.
Args:
axis_angle: A 3-vector where the direction is the axis of rotation and the
magnitude is the angle of rotation.
eps: an epsilon value for numerical stability.
Returns:
R: (3, 3) An orthonormal rotation matrix representing the same rotation.
"""
theta_squared = jnp.sum(axis_angle**2, axis=-1)
theta = _safe_sqrt(theta_squared)
# Near zero, we switch to using the first order Taylor expansion.
R_taylor = jnp.eye(3) + skew(axis_angle)
# Prevent bad gradients from propagating back when theta is small.
axis_angle_safe = jnp.where(theta_squared > eps**2, axis_angle, 0.0)
theta_safe = jnp.where(theta_squared > eps**2, theta, 1.0)
axis = axis_angle_safe / theta_safe
W = skew(axis)
R = (
jnp.eye(3)
+ jnp.sin(theta_safe) * W
+ (1.0 - jnp.cos(theta_safe)) * spin_math.matmul(W, W)
)
return jnp.where(theta_squared > eps**2, R, R_taylor)
# camp_zipnerf/internal/stepfun.py
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
# camp_zipnerf/internal/stepfun.py
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for linear splines."""
import functools
from internal import math
from internal import utils
import jax
from jax.experimental import checkify
import jax.numpy as jnp
def check_zero_endpoints(y):
checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.')
checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.')
def query(tq, t, v):
"""Query linear spline (t, v) at tq."""
utils.assert_valid_linspline(t, v)
interp = functools.partial(jnp.interp, left=0, right=0)
return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v)
def integrate(t, w):
"""Integrate (t, w) according to the trapezoid rule."""
utils.assert_valid_linspline(t, w)
return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1)
def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2):
"""Make w integrate to 1."""
utils.assert_valid_linspline(t, w)
return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None]
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
def clamp(t, y, minval, maxval):
"""Clamp (t, y) to be zero outside of t in [minval, maxval]."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Add in extra points at and immediately above/below the min/max vals.
ti = jnp.concatenate(
[
math.minus_eps(minval),
minval,
maxval,
math.plus_eps(maxval),
],
axis=-1,
)
tc, yo = insert_knot(ti, t, y)
# Zero the spline values outside of [minval, maxval].
yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo))
return tc, yc
def compute_integral(t, y):
"""Integrate a linear spline into a piecewise quadratic spline."""
utils.assert_valid_linspline(t, y)
eps = jnp.finfo(jnp.float32).eps ** 2
dt = jnp.diff(t)
a = jnp.diff(y) / jnp.maximum(eps, 2 * dt)
b = y[Ellipsis, :-1]
# The integral has an ambiguous global offset here, which we set to 0.
c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1)
c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1)
# This quadratic is parameterized as:
# (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i]
return a, b, c
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
def interpolate_integral(tq, t, a, b, c):
"""Interpolate into the piecewise quadratic returned by compute_integral()."""
utils.assert_valid_stepfun(t, a)
utils.assert_valid_stepfun(t, b)
utils.assert_valid_stepfun(t, c)
# Clip to valid inputs (assumes repeating boundaries).
tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:]))
# Lookup the quadratic coefficients corresponding to each input query.
idx0, _ = sorted_lookup(tq, t)
# TODO(barron): It might be faster to stack (a, c, b) during generation and
# do a single gather.
t0 = jnp.take_along_axis(t, idx0, axis=-1)
a0 = jnp.take_along_axis(a, idx0, axis=-1)
b0 = jnp.take_along_axis(b, idx0, axis=-1)
c0 = jnp.take_along_axis(c, idx0, axis=-1)
td = tq - t0
v = a0 * td**2 + b0 * td + c0
return v
def blur_stepfun(ts, ys, halfwidth):
"""Convolve a step function (ts, ys) with a box filter of size `halfwidth`."""
utils.assert_valid_stepfun(ts, ys)
# Blur each entire step function by a single `halfwidth` value.
<fim_suffix>
ts_lo = ts - halfwidth
ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth)
# The difference in adjacent `y` values (zero padded) divided by the
# difference in adjacent `t` values.
ys0 = jnp.concatenate(
[jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1
)
dy = jnp.diff(ys0) / (ts_hi - ts_lo)
# When decreasing t splat a positive second derivative, and when increasing
# t splat a negative second derivative.
tp = jnp.concatenate([ts_lo, ts_hi], axis=-1)
dyp = jnp.concatenate([dy, -dy], axis=-1)
# Sort the dilated t-values and their accompanying derivative weights.
idx = jnp.argsort(tp, axis=-1)
tp = jnp.take_along_axis(tp, idx, axis=-1)
dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1)
# A ramp is the double integral of a delta function, so if we double-
# integrate these derivatives you get the sum of a bunch of trapezoids.
yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1)
# Add in the missing first and last endpoint values, which must be zero
# because we assume zero padding on `ys`.
yp = jnp.concatenate(
[jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1
)
return tp, yp
<fim_middle># Dilate the t-values by at least numerical epsilon in each direction. | # Dilate the t-values by at least numerical epsilon in each direction. | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
# camp_zipnerf/internal/ref_utils.py
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
# camp_zipnerf/internal/coord.py
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v <fim_suffix>
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
<fim_middle># Barycentric weights. | # Barycentric weights. | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/coord.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/spin_math.py
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
# camp_zipnerf/internal/stepfun.py
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
# camp_zipnerf/internal/geometry.py
def line_distance(point1, dir1, point2,
dir2):
"""Compute the distance between two lines in 3D.
Note that this is the distance between lines and not line segments or rays;
i.e., it does not consider endpoints and will compute the distance assuming
the line extends infinitely in both directions.
Args:
point1: (3,) a point on the first line.
dir1: (3,) the direction vector of the first line.
point2: (3,) a point on the second line.
dir2: (3,) the direction vector of the second line.
Returns:
The distance between the two lines.
"""
is_parallel = are_lines_parallel(dir1, dir2)
skew_dist = skew_line_distance(point1, dir1, point2, dir2)
parallel_dist = line_to_point_distance(point1, dir1, point2)
return jnp.where(is_parallel, parallel_dist, skew_dist)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating coordinate spaces and distances along rays."""
from internal import geopoly
from internal import math
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
def contract(x):
"""Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077)."""
# Clamping to 1 produces correct scale inside |x| < 1
x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True))
scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq
z = scale * x
return z
def inv_contract(z):
"""The inverse of contract()."""
# Clamping to 1 produces correct scale inside |z| < 1
z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True))
inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq
x = z / inv_scale
return x
def track_linearize(fn, mean, cov):
"""Apply function `fn` to a set of means and covariances, ala a Kalman filter.
We can analytically transform a Gaussian parameterized by `mean` and `cov`
with a function `fn` by linearizing `fn` around `mean`, and taking advantage
of the fact that Covar[Ax + y] = A(Covar[x])A^T (see
https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
cov: a tensor of covariances, where the last two axes are the dimensions.
Returns:
fn_mean: the transformed means.
fn_cov: the transformed covariances.
"""
if (len(mean.shape) + 1) != len(cov.shape):
raise ValueError('cov must be non-diagonal')
fn_mean, lin_fn = jax.linearize(fn, mean)
fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov))
return fn_mean, fn_cov
def track_isotropic(fn, mean, scale):
"""Apply function `fn` to a set of means and scales, ala a Kalman filter.
This is the isotropic or scalar equivalent of track_linearize, as we're still
linearizing a function and tracking a Gaussian through it, but the input and
output Gaussians are all isotropic and are only represented with a single
`scale` value (where `scale**2` is the variance of the Gaussian).
Args:
fn: A function that can be applied to `mean`.
mean: a tensor of Gaussian means, where the last axis is the dimension.
scale: a tensor of scales, with the same shape as means[..., -1].
Returns:
fn_mean: the transformed means.
fn_scale: the transformed scales.
"""
if mean.shape[:-1] != scale.shape:
raise ValueError(
f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.'
)
d = mean.shape[-1]
fn_mean, lin_fn = jax.linearize(fn, mean)
if scale is not None:
# Compute the Jacobian of fn function at the locations of each mean.
jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)(
jnp.broadcast_to(jnp.eye(d), mean.shape + (d,))
)
# The cube root of the determinant of the Jacobian is the geometric mean
# of the eigenvalues of the Jacobian, which gives us the isotropic scaling
# implied by `fn` at each mean that `scale` should be multiplied by.
eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0.
abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac)))
# Special case d == 3 for speed's sake.
fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d))
else:
fn_scale = None
return fn_mean, fn_scale
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None):
"""Construct a bijection between metric distances and normalized distances.
See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a
detailed explanation.
Args:
fn: the function to ray distances.
t_near: a tensor of near-plane distances.
t_far: a tensor of far-plane distances.
fn_inv: Optional, if not None then it's used as the inverse of fn().
Returns:
t_to_s: a function that maps distances to normalized distances in [0, 1].
s_to_t: the inverse of t_to_s.
"""
if fn is None:
fn_fwd = lambda x: x
fn_inv = lambda x: x
else:
fn_fwd = fn
if fn_inv is None:
# A simple mapping from some functions to their inverse.
inv_mapping = {
'reciprocal': jnp.reciprocal,
'log': jnp.exp,
'exp': jnp.log,
'sqrt': jnp.square,
'square': jnp.sqrt,
}
fn_inv = inv_mapping[fn.__name__]
fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)]
# Forcibly clip t to the range of valid values, to guard against inf's.
t_clip = lambda t: jnp.clip(t, t_near, t_far)
t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near)
s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near))
return t_to_s, s_to_t
def expected_sin(mean, var):
"""Compute the mean of sin(x), x ~ N(mean, var)."""
return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value.
def integrated_pos_enc(mean, var, min_deg, max_deg):
"""Encode `x` with sinusoids scaled by 2^[min_deg, max_deg).
Args:
mean: tensor, the mean coordinates to be encoded
var: tensor, the variance of the coordinates to be encoded.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
Returns:
encoded: jnp.ndarray, encoded variables.
"""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = mean.shape[:-1] + (-1,)
scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape)
scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape)
return expected_sin(
jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([scaled_var] * 2, axis=-1),
)
def lift_and_diagonalize(mean, cov, basis):
"""Project `mean` and `cov` onto basis and diagonalize the projected cov."""
fn_mean = math.matmul(mean, basis)
fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2)
return fn_mean, fn_cov_diag
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = 2.0 ** jnp.arange(min_deg, max_deg)
shape = x.shape[:-1] + (-1,)
scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c).
scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c).
# Note that we're not using safe_sin, unlike IPE.
# (..., s*c + s*c).
four_feat = jnp.sin(
jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1)
)
if append_identity:
return jnp.concatenate([x, four_feat], axis=-1)
else:
return four_feat
def sqrtm(mat, return_eigs=False):
"""Take the matrix square root of a PSD matrix [..., d, d]."""
eigvec, eigval = jax.lax.linalg.eigh(
mat, symmetrize_input=False, sort_eigenvalues=False
)
scaling = math.safe_sqrt(eigval)[Ellipsis, None, :]
sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1))
return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat
def isotropize(cov, mode='accurate'):
"""Turn covariances into isotropic covariances with the same determinant."""
d = cov.shape[-1]
if d == 1:
return cov
if mode == 'fast':
det = jnp.linalg.det(cov)
diag_val = det ** (1 / d)
is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det)
elif mode == 'accurate':
log_det = jnp.linalg.slogdet(cov)[1]
diag_val = jnp.exp(log_det / d)
is_invalid = ~jnp.isfinite(log_det)
else:
raise ValueError(f'mode={mode} not implemented.')
cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None]
<fim_suffix>
# guard against NaN gradients!
cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso)
return cov_iso
def construct_perp_basis(directions):
"""Construct a perpendicular basis for each 3-vector in `directions`."""
if directions.shape[-1] != 3:
raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D')
# To generate a vector perpendicular to `directions`, we take a cross-product
# with an arbitrary vector [0, 0, 1].
cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0]))
# In the rare case that `directions` is very close to [0, 0, 1], we compute an
# alternate cross-product with [1, 1, 1] to use instead.
cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0]))
use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1)
cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a)
# Crossing `directions` with `cross1` gives us our 3rd vector.
cross2 = jnp.cross(directions, cross1)
# Normalize vectors before returning them.
normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True))
return normalize(cross1), normalize(cross2)
def hexify(rng, *, origins, directions, radii, tdist):
"""Produce hexagon-shaped samples from ray segments."""
# Construct a base set of angles, by linspacing [0, 2pi] in a specific order.
# This is one of two orderings of angles that doesn't induce any anisotropy
# into the sample covariance of the multisample coordinates. Any rotation and
# mirroring along the z-axis of this ordering is also valid.
# There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1].
# This seems to work less well though likely because of the strong correlation
# between adjacent angles.
thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1])
# Lift the angles to the size of the rays.
sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas))
thetas = jnp.broadcast_to(thetas, sz)
if rng is not None:
# Randomly reverse the order of half of the hexes.
key, rng = random.split(rng)
flip = random.bernoulli(key, shape=sz[:-1])
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
# Rotate each hex by some random amount.
key, rng = random.split(rng)
thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None]
else:
# If we're deterministic, flip and shift every other hex by 30 degrees.
flip = jnp.arange(thetas.shape[-2]) % 2
thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas)
thetas += (flip * jnp.pi / 6)[Ellipsis, None]
# TODO(barron): Plumb through the dx/dy frame for the original ray in the
# image plane, to avoid the need of this.
perp_axis1, perp_axis2 = construct_perp_basis(directions)
# Grab each t-interval's midpoint and half-width.
t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:]
s = (t0 + t1) / 2
d = (t1 - t0) / 2
# Compute the length along the ray for each multisample, using mip-NeRF math.
cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * (
(t1**2 + 2 * s**2)[Ellipsis, None]
+ (3 / np.sqrt(7))
* (np.arange(6) * (2 / 5) - 1)
* math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None]
)
# Compute the offset from the ray for each multisample.
perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz
# Go from ray coordinate to world coordinates.
cx = perp_mag * jnp.cos(thetas)
cy = perp_mag * jnp.sin(thetas)
control = (
origins[Ellipsis, None, None, :]
+ perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None]
+ perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None]
+ directions[Ellipsis, None, None, :] * cz[Ellipsis, None]
)
return control, perp_mag
def unscented_transform(mean, cov, basis, axis=0):
"""Construct "sigma points" along `axis` from each mean and covariance."""
d = cov.shape[-1]
mean_ex = jnp.expand_dims(mean, axis)
if basis == 'mean':
# This effectively disables the unscented transform.
return mean_ex
if basis.startswith('random_'):
num_random = int(basis.split('_')[-1])
# TODO(barron): use a non-fixed random seed?
noise = random.multivariate_normal(
random.PRNGKey(0),
jnp.zeros_like(mean),
cov,
(num_random,) + mean.shape[:-1],
)
control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis)
return control
sqrtm_cov = sqrtm(cov)
if any([
basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron']
]):
# Use tessellated regular polyhedra vertices (and vec(0)) as control points.
if d != 3:
raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.')
base_shape, angular_tesselation = basis.split('_')
transform = geopoly.generate_basis(
base_shape, int(angular_tesselation), remove_symmetries=False
).T
transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1)
transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None]
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
elif basis == 'julier':
# The most basic symmetric unscented transformation from the original paper,
# which yields 2*d+1 control points.
offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis)
control = jnp.concatenate(
[mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis
)
elif basis == 'menegaz':
# A compact unscented transformation from
# folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf
# which yields d+1 control points.
if d == 3:
# A hand-optimized version of the d==3 case.
sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True)
offsets = jnp.concatenate(
[-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1
)
control = mean_ex + jnp.moveaxis(offsets, -1, axis)
else:
transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d
# == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1))
transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1)
control = mean_ex + jnp.moveaxis(
math.matmul(sqrtm_cov, transform1), -1, axis
)
else:
raise ValueError(f'basis={basis} not implemented.')
return control
def compute_control_points(
means,
covs,
rays,
tdist,
rng,
unscented_mip_basis,
unscented_scale_mult,
):
"""Wrapper to compute unscented control points for the MLP class."""
if unscented_mip_basis == 'hexify':
control, perp_mag = hexify(
rng,
origins=rays.origins,
directions=rays.directions,
radii=rays.radii,
tdist=tdist,
)
else:
# Use a normal unscented transformation.
control = unscented_transform(
means,
covs,
basis=unscented_mip_basis,
axis=-2,
)
if unscented_scale_mult > 0:
if rays is None:
raise SyntaxError(
'Rays are required as input if unscented_scale_mult > 0.'
)
# Mimic the math used by hexify to produce comparable scales.
t_recon = jnp.sum(
(control - rays.origins[Ellipsis, None, None, :])
* rays.directions[Ellipsis, None, None, :],
axis=-1,
)
perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon
else:
perp_mag = None
return control, perp_mag
<fim_middle># Guard against NaN outputs when `det` is super small. Note that this does not | # Guard against NaN outputs when `det` is super small. Note that this does not | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geopoly.py
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
# camp_zipnerf/internal/ref_utils.py
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
# camp_zipnerf/internal/linspline.py
def insert_knot(ti, t, y):
"""Inserts knots ti into the linear spline (t, w). Assumes zero endpoints."""
utils.assert_valid_linspline(t, y)
check_zero_endpoints(y)
# Compute the spline value at the insertion points.
yi = query(ti, t, y)
# Concatenate the insertion points and values onto the end of each spline.
ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape)
yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape)
to = jnp.concatenate([t, ti_ex], axis=-1)
yo = jnp.concatenate([y, yi_ex], axis=-1)
# Sort the spline according to t.
sort_idx = jnp.argsort(to)
to = jnp.take_along_axis(to, sort_idx, axis=-1)
yo = jnp.take_along_axis(yo, sort_idx, axis=-1)
return to, yo
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for manipulating step functions (piecewise-constant 1D functions).
We have a shared naming and dimension convention for these functions.
All input/output step functions are assumed to be aligned along the last axis.
`t` always indicates the x coordinates of the *endpoints* of a step function.
`y` indicates unconstrained values for the *bins* of a step function
`w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin
values that *integrate* to <= 1.
"""
from internal import linspline
from internal import math
from internal import utils
import jax
import jax.numpy as jnp
import numpy as np
def query(tq, t, y, left=None, right=None):
"""Query step function (t, y) at locations tq. Edges repeat by default."""
utils.assert_valid_stepfun(t, y)
# Query the step function to recover the interval value.
(i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu())
# Apply boundary conditions.
left = y[Ellipsis, :1] if left is None else left
right = y[Ellipsis, -1:] if right is None else right
yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq)
return yq
def weight_to_pdf(t, w):
"""Turn a vector of weights that sums to 1 into a PDF that integrates to 1."""
utils.assert_valid_stepfun(t, w)
td = jnp.diff(t)
return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td))
def pdf_to_weight(t, p):
"""Turn a PDF that integrates to 1 into a vector of weights that sums to 1."""
utils.assert_valid_stepfun(t, p)
return p * jnp.diff(t)
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
def invert_cdf(u, t, w_logits):
"""Invert the CDF defined by (t, w) at the points specified by u in [0, 1)."""
utils.assert_valid_stepfun(t, w_logits)
# Compute the PDF and CDF for each weight vector.
w = jax.nn.softmax(w_logits, axis=-1)
cw = integrate_weights(w)
# Interpolate into the inverse CDF.
t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu())
return t_new
def sample(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
deterministic_center=False,
eps=jnp.finfo(jnp.float32).eps,
):
"""Piecewise-Constant PDF sampling from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of samples.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
deterministic_center: bool, if False, when `rng` is None return samples that
linspace the entire PDF. If True, skip the front and back of the linspace
so that the centers of each PDF interval are returned.
eps: float, something like numerical epsilon.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
# Draw uniform samples.
if rng is None:
# Match the behavior of jax.random.uniform() by spanning [0, 1-eps].
if deterministic_center:
pad = 1 / (2 * num_samples)
u = jnp.linspace(pad, 1.0 - pad - eps, num_samples)
else:
u = jnp.linspace(0, 1.0 - eps, num_samples)
u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,))
else:
# `u` is in [0, 1) --- it can be zero, but it can never be 1.
u_max = eps + (1 - eps) / num_samples
max_jitter = (1 - u_max) / (num_samples - 1) - eps
d = 1 if single_jitter else num_samples
u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform(
rng, t.shape[:-1] + (d,), maxval=max_jitter
)
return invert_cdf(u, t, w_logits)
def sample_intervals(
rng,
t,
w_logits,
num_samples,
single_jitter=False,
domain=(-jnp.inf, jnp.inf),
):
"""Sample *intervals* (rather than points) from a step function.
Args:
rng: random number generator (or None for `linspace` sampling).
t: [..., num_bins + 1], bin endpoint coordinates (must be sorted)
w_logits: [..., num_bins], logits corresponding to bin weights
num_samples: int, the number of intervals to sample.
single_jitter: bool, if True, jitter every sample along each ray by the same
amount in the inverse CDF. Otherwise, jitter each sample independently.
domain: (minval, maxval), the range of valid values for `t`.
Returns:
t_samples: jnp.ndarray(float32), [batch_size, num_samples].
"""
utils.assert_valid_stepfun(t, w_logits)
if num_samples <= 1:
raise ValueError(f'num_samples must be > 1, is {num_samples}.')
# Sample a set of points from the step function.
centers = sample(
rng, t, w_logits, num_samples, single_jitter, deterministic_center=True
)
# The intervals we return will span the midpoints of each adjacent sample.
mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2
# Each first/last fencepost is the reflection of the first/last midpoint
# around the first/last sampled center.
first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1]
last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:]
samples = jnp.concatenate([first, mid, last], axis=-1)
# We clamp to the limits of the input domain, provided by the caller.
samples = jnp.clip(samples, *domain)
return samples
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
<fim_suffix>
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
def weighted_percentile(t, w, ps):
"""Compute the weighted percentiles of a step function. w's must sum to 1."""
utils.assert_valid_stepfun(t, w)
cw = integrate_weights(w)
# We want to interpolate into the integrated weights according to `ps`.
wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
jnp.array(ps) / 100, cw, t
)
return wprctile
def resample(t, tp, vp, use_avg=False):
"""Resample a step function defined by (tp, vp) into intervals t.
Notation roughly matches jnp.interp. Resamples by summation by default.
Args:
t: tensor with shape (..., n+1), the endpoints to resample into.
tp: tensor with shape (..., m+1), the endpoints of the step function being
resampled.
vp: tensor with shape (..., m), the values of the step function being
resampled.
use_avg: bool, if False, return the sum of the step function for each
interval in `t`. If True, return the average, weighted by the width of
each interval in `t`.
Returns:
v: tensor with shape (..., n), the values of the resampled step function.
"""
utils.assert_valid_stepfun(tp, vp)
if use_avg:
wp = jnp.diff(tp)
v_numer = resample(t, tp, vp * wp, use_avg=False)
v_denom = resample(t, tp, wp, use_avg=False)
v = math.safe_div(v_numer, v_denom)
return v
acc = jnp.cumsum(vp, axis=-1)
acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1)
acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')(
t, tp, acc0
)
v = jnp.diff(acc0_resampled, axis=-1)
return v
def blur_and_resample_weights(tq, t, w, blur_halfwidth):
"""Blur the (t, w) histogram by blur_halfwidth, then resample it into tq."""
utils.assert_valid_stepfun(t, w)
# Convert the histogram to a PDF.
p = weight_to_pdf(t, w)
# Blur the PDF step function into a piecewise linear spline PDF.
t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth)
# Integrate the spline PDF, then query it to get integrated weights.
quad = linspline.compute_integral(t_linspline, p_linspline)
acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad)
# Undo the integration to get weights.
wq = jnp.diff(acc_wq, axis=-1)
# Fix negative values to 0, as they should never happen but may due to
# numerical issues.
wq = jnp.maximum(0, wq)
return wq
<fim_middle># The loss incurred between all pairs of intervals. | # The loss incurred between all pairs of intervals. | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/linspline.py
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
# camp_zipnerf/internal/vis.py
def visualize_cmap(
value,
weight,
colormap,
lo=None,
hi=None,
percentile=99.0,
curve_fn=lambda x: x,
modulus=None,
matte_background=True,
):
"""Visualize a 1D image and a 1D weighting according to some colormap.
Args:
value: A 1D image.
weight: A weight map, in [0, 1].
colormap: A colormap function.
lo: The lower bound to use when rendering, if None then use a percentile.
hi: The upper bound to use when rendering, if None then use a percentile.
percentile: What percentile of the value map to crop to when automatically
generating `lo` and `hi`. Depends on `weight` as well as `value'.
curve_fn: A curve function that gets applied to `value`, `lo`, and `hi`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
modulus: If not None, mod the normalized value by `modulus`. Use (0, 1]. If
`modulus` is not None, `lo`, `hi` and `percentile` will have no effect.
matte_background: If True, matte the image over a checkerboard.
Returns:
A colormap rendering.
"""
# Identify the values that bound the middle of `value' according to `weight`.
lo_auto, hi_auto = weighted_percentile(
value, weight, [50 - percentile / 2, 50 + percentile / 2]
)
# If `lo` or `hi` are None, use the automatically-computed bounds above.
eps = jnp.finfo(jnp.float32).eps
lo = lo or (lo_auto - eps)
hi = hi or (hi_auto + eps)
# Curve all values.
value, lo, hi = [curve_fn(x) for x in [value, lo, hi]]
# Wrap the values around if requested.
if modulus:
value = jnp.mod(value, modulus) / modulus
else:
# Otherwise, just scale to [0, 1].
value = jnp.clip((value - jnp.minimum(lo, hi)) / jnp.abs(hi - lo), 0, 1)
value = jnp.nan_to_num(value)
if colormap:
colorized = colormap(value)[Ellipsis, :3]
else:
if value.shape[-1] != 3:
raise ValueError(f'value must have 3 channels but has {value.shape[-1]}')
colorized = value
return matte(colorized, weight) if matte_background else colorized
# camp_zipnerf/internal/camera_delta.py
def _create_points_from_contracted_frustum(
self,
camera: jaxcam.Camera,
rng: chex.PRNGKey,
sample_depth_contracted: bool = True,
) -> jnp.ndarray:
"""Samples points uniformly in the contracted frustum.
We first compute the contracted camera frustum by intersecting camera rays
with the bounding sphere (which has radius 2). This defines a frustum from
the near plane to infinity. We can then apply the inverse of the contraction
to the points to get the metric point samples.
Args:
camera: The camera used to compute the frustum.
rng: A PRNGKey used to sample points.
sample_depth_contracted: If True, sample the depth in the contracted
space. Otherwise, sample linearly in metric space.
Returns:
Points sampled uniformly in the contracted frustum.
"""
if self.precondition_far >= 2.0:
raise ValueError('Far plane must be <2 when using contracted planes.')
rng, key1, key2 = random.split(rng, 3)
pixels = (
random.uniform(key1, (self.precondition_num_points, 2))
* jnp.array([camera.image_size_x - 1, camera.image_size_y - 1])
+ 0.5
)
rays = jaxcam.pixels_to_rays(camera, pixels)
near_points = camera.position + rays * self.precondition_near
far_points = geometry.ray_sphere_intersection(
camera.position, rays, radius=self.precondition_far
)
s_dist = random.uniform(key2, (self.precondition_num_points, 1))
if sample_depth_contracted:
# Lerp between contracted near and far plane.
points = s_dist * far_points + (1 - s_dist) * near_points
points = coord.inv_contract(points)
else:
# Lerp between uncontracted near and far plane.
near_points = coord.inv_contract(near_points)
far_points = coord.inv_contract(far_points)
points = s_dist * far_points + (1 - s_dist) * near_points
return points
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
<fim_suffix>
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle># jnp.searchsorted() has slightly different conventions for boundary | # jnp.searchsorted() has slightly different conventions for boundary | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
<fim_suffix>
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle># iterations | # iterations | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
# camp_zipnerf/internal/ref_utils.py
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
# camp_zipnerf/internal/coord.py
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
<fim_suffix>
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
<fim_middle>for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j))) | for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j))) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geopoly.py
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
# camp_zipnerf/internal/stepfun.py
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
# camp_zipnerf/internal/image_utils.py
def render_histogram(x, **kwargs):
"""Call pyplot's hist() and render it to a numpy buffer."""
fig = plt.figure()
fig.gca().hist(x, **kwargs)
fig.canvas.draw()
hw = fig.canvas.get_width_height()[::-1]
buf = fig.canvas.tostring_rgb()
array = np.frombuffer(buf, dtype=np.uint8).reshape(hw + (3,))
plt.close(fig)
return array
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
<fim_suffix>
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l)) | for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l)) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/math.py
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
# camp_zipnerf/internal/ref_utils.py
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
# camp_zipnerf/internal/coord.py
def contract3_isoscale(x):
"""A fast version of track_isotropic(contract, *)'s scaling for 3D inputs."""
if x.shape[-1] != 3:
raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.')
norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1))
# Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq:
return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq))
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
<fim_suffix>
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts)
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
<fim_middle>for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j))) | for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j))) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
<fim_suffix>
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>for item in fn(*args, **kwargs):
results_queue.put(item) | for item in fn(*args, **kwargs):
results_queue.put(item) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/geometry.py
def cartesian_to_spherical(
cartesian_vector,
eps = onp.float32(onp.finfo(onp.float32).tiny)
):
"""Converts cartesian to spherical coordinates.
Uses a right-handed coordinate system where z is up and y is right. The
spherical coordinates are defined by radius (r), inclination (theta)
β [0, Ο]) from fixed zenit direction (z) and azimuth (phi) β [0, 2Ο]) from
x-axis to y-axis.
We are using the phyiscal coordinate system as described here:
https://en.wikipedia.org/wiki/Spherical_coordinate_system.
Args:
cartesian_vector: (..., 3) Cartesian coordinates defined by (x, y, z).
eps: Epsilon used for safe_acos.
Returns:
Spherical coordinates as tuple of r, elevation (theta), azimuth (phi).
"""
x = cartesian_vector[Ellipsis, 0]
y = cartesian_vector[Ellipsis, 1]
z = cartesian_vector[Ellipsis, 2]
r = optax.safe_norm(cartesian_vector, min_norm=eps, axis=-1)
theta = spin_math.safe_acos(z / r)
phi = jnp.arctan2(y, x)
return r, theta, phi # pytype: disable=bad-return-type # jax-ndarray
# camp_zipnerf/internal/grid_utils.py
def trilerp(
values,
coordinates,
datastructure,
):
"""Sample from a hash or 3D voxel grid `values` using `coordinates`.
TODO(keunhong): Consider making datastructure an enum as well.
Args:
values: A (D,H,W,C) array containing values if datastructure == 'grid' or a
(N,C) array containing values if datastructure == 'hash'.
coordinates: A (..., 3) array containing coordinates to sample. The values
must be between 0 and the size of that dimension.
datastructure: Which datastructure to use, either 'grid' or 'hash'.
op_mode: Which resample op implementation to use, see `ResampleOpMode`.
Returns:
A (..., C) array containing the interpolated values at the given
coordinates.
Raises:
ValueError: If an invalid datastructure is passed.
"""
if datastructure == 'hash':
fn = hash_resample.hash_resample_3d
elif datastructure == 'grid':
# Note: unlike hash_resample_3d, resample_3d expects integer coordinate
# voxel centers, so we offset the coordinates by 0.5 here. We also
# flip the input coordinates since the convention used in `resample_3d`
# is for input point (x, y, z) to index grid_values[z, y, x]. We prefer the
# grid axis order to align with the Cartesian coordinate axes.
coordinates = jnp.flip(coordinates - 0.5, axis=-1)
def fn(v, c):
"""Add and remove two extra dims at the front of coord/output tensors."""
return resample.resample_3d(v, c[None, None])[0, 0]
else:
raise ValueError(
'datastructure must be either `grid` or `hash` but '
f'`{datastructure}` was given.'
)
coordinates_flat = coordinates.reshape(-1, coordinates.shape[-1])
if values.dtype != coordinates_flat.dtype:
coordinates_flat = coordinates_flat.astype(values.dtype)
result_flat = fn(values, coordinates_flat)
result = result_flat.reshape(coordinates.shape[:-1] + (values.shape[-1],))
return result
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
<fim_suffix>
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k) | for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/linspline.py
def sorted_lookup(x, xp):
"""Lookup `x` at sorted locations `xp`."""
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
functools.partial(jnp.searchsorted, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx0 = jnp.maximum(idx - 1, 0)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
return idx0, idx1
# camp_zipnerf/internal/vis.py
def visualize_cmap(
value,
weight,
colormap,
lo=None,
hi=None,
percentile=99.0,
curve_fn=lambda x: x,
modulus=None,
matte_background=True,
):
"""Visualize a 1D image and a 1D weighting according to some colormap.
Args:
value: A 1D image.
weight: A weight map, in [0, 1].
colormap: A colormap function.
lo: The lower bound to use when rendering, if None then use a percentile.
hi: The upper bound to use when rendering, if None then use a percentile.
percentile: What percentile of the value map to crop to when automatically
generating `lo` and `hi`. Depends on `weight` as well as `value'.
curve_fn: A curve function that gets applied to `value`, `lo`, and `hi`
before the rest of visualization. Good choices: x, 1/(x+eps), log(x+eps).
modulus: If not None, mod the normalized value by `modulus`. Use (0, 1]. If
`modulus` is not None, `lo`, `hi` and `percentile` will have no effect.
matte_background: If True, matte the image over a checkerboard.
Returns:
A colormap rendering.
"""
# Identify the values that bound the middle of `value' according to `weight`.
lo_auto, hi_auto = weighted_percentile(
value, weight, [50 - percentile / 2, 50 + percentile / 2]
)
# If `lo` or `hi` are None, use the automatically-computed bounds above.
eps = jnp.finfo(jnp.float32).eps
lo = lo or (lo_auto - eps)
hi = hi or (hi_auto + eps)
# Curve all values.
value, lo, hi = [curve_fn(x) for x in [value, lo, hi]]
# Wrap the values around if requested.
if modulus:
value = jnp.mod(value, modulus) / modulus
else:
# Otherwise, just scale to [0, 1].
value = jnp.clip((value - jnp.minimum(lo, hi)) / jnp.abs(hi - lo), 0, 1)
value = jnp.nan_to_num(value)
if colormap:
colorized = colormap(value)[Ellipsis, :3]
else:
if value.shape[-1] != 3:
raise ValueError(f'value must have 3 channels but has {value.shape[-1]}')
colorized = value
return matte(colorized, weight) if matte_background else colorized
# camp_zipnerf/internal/camera_delta.py
def _create_points_from_contracted_frustum(
self,
camera: jaxcam.Camera,
rng: chex.PRNGKey,
sample_depth_contracted: bool = True,
) -> jnp.ndarray:
"""Samples points uniformly in the contracted frustum.
We first compute the contracted camera frustum by intersecting camera rays
with the bounding sphere (which has radius 2). This defines a frustum from
the near plane to infinity. We can then apply the inverse of the contraction
to the points to get the metric point samples.
Args:
camera: The camera used to compute the frustum.
rng: A PRNGKey used to sample points.
sample_depth_contracted: If True, sample the depth in the contracted
space. Otherwise, sample linearly in metric space.
Returns:
Points sampled uniformly in the contracted frustum.
"""
if self.precondition_far >= 2.0:
raise ValueError('Far plane must be <2 when using contracted planes.')
rng, key1, key2 = random.split(rng, 3)
pixels = (
random.uniform(key1, (self.precondition_num_points, 2))
* jnp.array([camera.image_size_x - 1, camera.image_size_y - 1])
+ 0.5
)
rays = jaxcam.pixels_to_rays(camera, pixels)
near_points = camera.position + rays * self.precondition_near
far_points = geometry.ray_sphere_intersection(
camera.position, rays, radius=self.precondition_far
)
s_dist = random.uniform(key2, (self.precondition_num_points, 1))
if sample_depth_contracted:
# Lerp between contracted near and far plane.
points = s_dist * far_points + (1 - s_dist) * near_points
points = coord.inv_contract(points)
else:
# Lerp between uncontracted near and far plane.
near_points = coord.inv_contract(near_points)
far_points = coord.inv_contract(far_points)
points = s_dist * far_points + (1 - s_dist) * near_points
return points
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
<fim_suffix>
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1)) | for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1)) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
<fim_suffix>
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>for item in fn(*args, **kwargs):
results_queue.put(item) | for item in fn(*args, **kwargs):
results_queue.put(item) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/quaternion.py
def from_axis_angle(
axis_angle, eps = jnp.finfo(jnp.float32).eps
):
"""Constructs a quaternion for the given axis/angle rotation.
Args:
axis_angle: A 3-vector where the direction is the axis of rotation and the
magnitude is the angle of rotation.
eps: A small number used for numerical stability around zero rotations.
Returns:
A quaternion encoding the same rotation.
"""
theta_squared = jnp.sum(axis_angle**2, axis=-1)
theta = _safe_sqrt(theta_squared)
half_theta = theta / 2.0
k = jnp.sin(half_theta) / theta
# Avoid evaluating sqrt when theta is close to zero.
k = jnp.where(theta_squared > eps**2, k, 0.5)
qw = jnp.where(theta_squared > eps**2, jnp.cos(half_theta), 1.0)
qx = axis_angle[0] * k
qy = axis_angle[1] * k
qz = axis_angle[2] * k
return jnp.squeeze(jnp.array([qx, qy, qz, qw]))
# camp_zipnerf/internal/grid_utils.py
def grid_sizes(self):
"""Returns the grid sizes."""
desired_num_scales = 1 + self.scale_supersample * onp.log2(
self.max_grid_size / self.min_grid_size
)
num_scales = int(onp.round(desired_num_scales))
if onp.abs(desired_num_scales - num_scales) > 1e-4:
raise ValueError(
'grid scale parameters are ('
+ f'min_grid_size={self.min_grid_size}, '
+ f'max_grid_size={self.max_grid_size}, '
+ f'scale_supersample={self.scale_supersample}), '
+ f'which yields a non-integer number of scales {desired_num_scales}.'
)
return onp.round(
onp.geomspace(
self.min_grid_size,
self.max_grid_size,
num_scales,
)
).astype(onp.int32)
# camp_zipnerf/internal/quaternion.py
def exp(q, eps = 1e-8):
"""Computes the quaternion exponential.
References:
https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions
Args:
q: the quaternion in (x,y,z,w) format or (x,y,z) if is_pure is True.
eps: an epsilon value for numerical stability.
Returns:
The exponential of q.
"""
is_pure = q.shape[-1] == 3
if is_pure:
s = jnp.zeros_like(q[Ellipsis, -1:])
v = q
else:
v = im(q)
s = re(q)
norm_v = linalg.norm(v, axis=-1, keepdims=True)
exp_s = jnp.exp(s)
w = jnp.cos(norm_v)
xyz = jnp.sin(norm_v) * v / jnp.maximum(norm_v, eps * jnp.ones_like(norm_v))
return exp_s * jnp.concatenate((xyz, w), axis=-1)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for constructing geodesic polyhedron, which are used as a basis."""
import itertools
import numpy as np
def compute_sq_dist(mat0, mat1=None):
"""Compute the squared Euclidean distance between all pairs of columns."""
if mat1 is None:
mat1 = mat0
# Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y.
sq_norm0 = np.sum(mat0**2, 0)
sq_norm1 = np.sum(mat1**2, 0)
sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1
sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors.
return sq_dist
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4):
"""Tesselate the vertices of a geodesic polyhedron.
Args:
base_verts: tensor of floats, the vertex coordinates of the geodesic.
base_faces: tensor of ints, the indices of the vertices of base_verts that
constitute eachface of the polyhedra.
v: int, the factor of the tesselation (v==1 is a no-op).
eps: float, a small value used to determine if two vertices are the same.
Returns:
verts: a tensor of floats, the coordinates of the tesselated vertices.
"""
if not isinstance(v, int):
raise ValueError(f'v {v} must an integer')
tri_weights = compute_tesselation_weights(v)
verts = []
<fim_suffix>
verts = np.concatenate(verts, 0)
sq_dist = compute_sq_dist(verts.T)
assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist])
unique = np.unique(assignment)
verts = verts[unique, :]
return verts
def generate_basis(
base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4
):
"""Generates a 3D basis by tesselating a geometric polyhedron.
Args:
base_shape: string, the name of the starting polyhedron, must be either
'tetrahedron', 'icosahedron' or 'octahedron'.
angular_tesselation: int, the number of times to tesselate the polyhedron,
must be >= 1 (a value of 1 is a no-op to the polyhedron).
remove_symmetries: bool, if True then remove the symmetric basis columns,
which is usually a good idea because otherwise projections onto the basis
will have redundant negative copies of each other.
eps: float, a small number used to determine symmetries.
Returns:
basis: a matrix with shape [3, n].
"""
if base_shape == 'tetrahedron':
verts = np.array([
(np.sqrt(8 / 9), 0, -1 / 3),
(-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3),
(-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3),
(0, 0, 1),
])
faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)])
elif base_shape == 'icosahedron':
a = (np.sqrt(5) + 1) / 2
verts = np.array([
(-1, 0, a),
(1, 0, a),
(-1, 0, -a),
(1, 0, -a),
(0, a, 1),
(0, a, -1),
(0, -a, 1),
(0, -a, -1),
(a, 1, 0),
(-a, 1, 0),
(a, -1, 0),
(-a, -1, 0),
]) / np.sqrt(a + 2)
faces = np.array([
(0, 4, 1),
(0, 9, 4),
(9, 5, 4),
(4, 5, 8),
(4, 8, 1),
(8, 10, 1),
(8, 3, 10),
(5, 3, 8),
(5, 2, 3),
(2, 7, 3),
(7, 10, 3),
(7, 6, 10),
(7, 11, 6),
(11, 0, 6),
(0, 1, 6),
(6, 1, 10),
(9, 0, 11),
(9, 11, 2),
(9, 2, 5),
(7, 2, 11),
])
elif base_shape == 'octahedron':
verts = np.array(
[(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)]
)
corners = np.array(list(itertools.product([-1, 1], repeat=3)))
pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2)
faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1)
else:
raise ValueError(f'base_shape {base_shape} not supported')
verts = tesselate_geodesic(verts, faces, angular_tesselation)
if remove_symmetries:
# Remove elements of `verts` that are reflections of each other.
match = compute_sq_dist(verts.T, -verts.T) < eps
verts = verts[~np.any(np.triu(match), axis=0), :]
basis = verts[:, ::-1]
return basis
<fim_middle>for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts) | for base_face in base_faces:
new_verts = np.matmul(tri_weights, base_verts[base_face, :])
new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True))
verts.append(new_verts) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/geopoly.py
def compute_tesselation_weights(v):
"""Tesselate the vertices of a triangle by a factor of `v`."""
if v < 1:
raise ValueError(f'v {v} must be >= 1')
int_weights = []
for i in range(v + 1):
for j in range(v + 1 - i):
int_weights.append((i, j, v - (i + j)))
int_weights = np.array(int_weights)
weights = int_weights / v # Barycentric weights.
return weights
# camp_zipnerf/internal/stepfun.py
def lossfun_distortion(t, w):
"""Compute iint w[i] w[j] |t[i] - t[j]| di dj."""
utils.assert_valid_stepfun(t, w)
# The loss incurred between all pairs of intervals.
ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2
dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :])
loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1)
# The loss incurred within each individual interval with itself.
loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3
return loss_inter + loss_intra
# camp_zipnerf/internal/image_utils.py
def render_histogram(x, **kwargs):
"""Call pyplot's hist() and render it to a numpy buffer."""
fig = plt.figure()
fig.gca().hist(x, **kwargs)
fig.canvas.draw()
hw = fig.canvas.get_width_height()[::-1]
buf = fig.canvas.tostring_rgb()
array = np.frombuffer(buf, dtype=np.uint8).reshape(hw + (3,))
plt.close(fig)
return array
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
<fim_suffix>
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>for m in range(l + 1):
ml_list.append((m, l)) | for m in range(l + 1):
ml_list.append((m, l)) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
<fim_suffix>
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>for item in fn(*args, **kwargs):
results_queue.put(item) | for item in fn(*args, **kwargs):
results_queue.put(item) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
<fim_suffix>
return result_fn
return decorator
<fim_middle>def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result() | def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result() | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
<fim_suffix>
return decorator
<fim_middle>def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn | def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/spin_math.py
def safe_log(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = jnp.finfo(jnp.float32).eps):
"""Computes a safe log that avoids evaluating at zero.
Args:
x: Input array.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
log(x) or log(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.log(safe_x)
# camp_zipnerf/internal/spin_math.py
def safe_sqrt(x,
*,
eps = jnp.finfo(jnp.float32).eps,
value_at_zero = 0.0):
"""A safe version of jnp.sqrt that avoid evaluating at zero.
Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7.
Args:
x: The operand.
eps: A small number to prevent NaNs.
value_at_zero: The value to clamp x to near zero. The return value will be
sqrt(value_at_zero)
Returns:
The sqrt(x), or sqrt(value_at_zero) near zero.
"""
safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero))
return jnp.sqrt(safe_x)
# camp_zipnerf/internal/configs.py
def parse_call_def_partial(call_def):
"""Parses a function call definition partially.
Parses a CallDef, but instead of evaluating the function immediately,
return a partial function with the given kwargs.
Args:
call_def: A tuple containing (fn, kwargs).
Returns:
A partial function `fn(**kwargs)`.
"""
fn, kwargs = call_def
return functools.partial(fn, **kwargs)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1)
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
<fim_suffix>
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) | def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/geometry.py
def cartesian_to_spherical(
cartesian_vector,
eps = onp.float32(onp.finfo(onp.float32).tiny)
):
"""Converts cartesian to spherical coordinates.
Uses a right-handed coordinate system where z is up and y is right. The
spherical coordinates are defined by radius (r), inclination (theta)
β [0, Ο]) from fixed zenit direction (z) and azimuth (phi) β [0, 2Ο]) from
x-axis to y-axis.
We are using the phyiscal coordinate system as described here:
https://en.wikipedia.org/wiki/Spherical_coordinate_system.
Args:
cartesian_vector: (..., 3) Cartesian coordinates defined by (x, y, z).
eps: Epsilon used for safe_acos.
Returns:
Spherical coordinates as tuple of r, elevation (theta), azimuth (phi).
"""
x = cartesian_vector[Ellipsis, 0]
y = cartesian_vector[Ellipsis, 1]
z = cartesian_vector[Ellipsis, 2]
r = optax.safe_norm(cartesian_vector, min_norm=eps, axis=-1)
theta = spin_math.safe_acos(z / r)
phi = jnp.arctan2(y, x)
return r, theta, phi # pytype: disable=bad-return-type # jax-ndarray
# camp_zipnerf/internal/grid_utils.py
def trilerp(
values,
coordinates,
datastructure,
):
"""Sample from a hash or 3D voxel grid `values` using `coordinates`.
TODO(keunhong): Consider making datastructure an enum as well.
Args:
values: A (D,H,W,C) array containing values if datastructure == 'grid' or a
(N,C) array containing values if datastructure == 'hash'.
coordinates: A (..., 3) array containing coordinates to sample. The values
must be between 0 and the size of that dimension.
datastructure: Which datastructure to use, either 'grid' or 'hash'.
op_mode: Which resample op implementation to use, see `ResampleOpMode`.
Returns:
A (..., C) array containing the interpolated values at the given
coordinates.
Raises:
ValueError: If an invalid datastructure is passed.
"""
if datastructure == 'hash':
fn = hash_resample.hash_resample_3d
elif datastructure == 'grid':
# Note: unlike hash_resample_3d, resample_3d expects integer coordinate
# voxel centers, so we offset the coordinates by 0.5 here. We also
# flip the input coordinates since the convention used in `resample_3d`
# is for input point (x, y, z) to index grid_values[z, y, x]. We prefer the
# grid axis order to align with the Cartesian coordinate axes.
coordinates = jnp.flip(coordinates - 0.5, axis=-1)
def fn(v, c):
"""Add and remove two extra dims at the front of coord/output tensors."""
return resample.resample_3d(v, c[None, None])[0, 0]
else:
raise ValueError(
'datastructure must be either `grid` or `hash` but '
f'`{datastructure}` was given.'
)
coordinates_flat = coordinates.reshape(-1, coordinates.shape[-1])
if values.dtype != coordinates_flat.dtype:
coordinates_flat = coordinates_flat.astype(values.dtype)
result_flat = fn(values, coordinates_flat)
result = result_flat.reshape(coordinates.shape[:-1] + (values.shape[-1],))
return result
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reflection directions and directional encodings."""
import math
from internal import math as math_lib
import jax.numpy as jnp
import numpy as np
def reflect(viewdirs, normals):
"""Reflect view directions about normals.
The reflection of a vector v about a unit vector n is a vector u such that
dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two
equations is u = 2 dot(n, v) n - v.
Args:
viewdirs: [..., 3] array of view directions.
normals: [..., 3] array of normal directions (assumed to be unit vectors).
Returns:
[..., 3] array of reflection directions.
"""
return (
2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals
- viewdirs
)
def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps):
"""Normalize x to unit length along last axis.
Normalizing vectors is surprisingly tricky, because you have to address the
case where the denominator in the normalization is tiny or zero, in which case
gradients will explode. For this reason, we perform two normalizations: in the
forward pass, we clamp the denominator with ~1e-40, but in the backward pass
we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the
output of this function is unit norm (unless x is very very small) while
preventing exploding gradients.
Args:
x: The array of values to normalize.
grad_eps: The value to clip the squared norm by before division in the
backward pass.
Returns:
A normalized array x / ||x||, normalized along the last axis.
"""
tiny = jnp.finfo(jnp.float32).tiny
grad_eps = jnp.maximum(tiny, grad_eps)
denom_sq = jnp.sum(x**2, axis=-1, keepdims=True)
normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq))
normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq))
# Use `normal_val` in the forward pass but `normal_grad` in the backward pass.
normal = math_lib.override_gradient(normal_val, normal_grad)
return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal)
def compute_weighted_mae(weights, normals, normals_gt):
"""Compute weighted mean angular error, assuming normals are unit length."""
angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1))
return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum())
def generalized_binomial_coeff(a, k):
"""Compute generalized binomial coefficients."""
return np.prod(a - np.arange(k)) / math.factorial(k)
def assoc_legendre_coeff(l, m, k):
"""Compute associated Legendre polynomial coefficients.
Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the
(l, m)th associated Legendre polynomial, P_l^m(cos(theta)).
Args:
l: associated Legendre polynomial degree.
m: associated Legendre polynomial order.
k: power of cos(theta).
Returns:
A float, the coefficient of the term corresponding to the inputs.
"""
return (
(-1) ** m
* 2**l
* math.factorial(l)
/ math.factorial(k)
/ math.factorial(l - k - m)
* generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l)
)
def sph_harm_coeff(l, m, k):
"""Compute spherical harmonic coefficients."""
return np.sqrt(
(2.0 * l + 1.0)
* math.factorial(l - m)
/ (4.0 * np.pi * math.factorial(l + m))
) * assoc_legendre_coeff(l, m, k)
def get_ml_array(deg_view):
"""Create a list with all pairs of (l, m) values to use in the encoding."""
ml_list = []
for i in range(deg_view):
l = 2**i
# Only use nonnegative m values, later splitting real and imaginary parts.
for m in range(l + 1):
ml_list.append((m, l))
# Convert list into a numpy array.
ml_array = np.array(ml_list).T
return ml_array
def generate_ide_fn(deg_view):
"""Generate integrated directional encoding (IDE) function.
This function returns a function that computes the integrated directional
encoding from Equations 6-8 of arxiv.org/abs/2112.03907.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating integrated directional encoding.
Raises:
ValueError: if deg_view is larger than 5.
"""
if deg_view > 5:
raise ValueError('Only deg_view of at most 5 is numerically stable.')
ml_array = get_ml_array(deg_view)
l_max = 2 ** (deg_view - 1)
# Create a matrix corresponding to ml_array holding all coefficients, which,
# when multiplied (from the right) by the z coordinate Vandermonde matrix,
# results in the z component of the encoding.
mat = np.zeros((l_max + 1, ml_array.shape[1]))
for i, (m, l) in enumerate(ml_array.T):
for k in range(l - m + 1):
mat[k, i] = sph_harm_coeff(l, m, k)
<fim_suffix>
return integrated_dir_enc_fn
def generate_dir_enc_fn(deg_view):
"""Generate directional encoding (DE) function.
Args:
deg_view: number of spherical harmonics degrees to use.
Returns:
A function for evaluating directional encoding.
"""
integrated_dir_enc_fn = generate_ide_fn(deg_view)
def dir_enc_fn(xyz):
"""Function returning directional encoding (DE)."""
return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1]))
return dir_enc_fn
def orientation_loss(w, n, v):
"""Orientation loss on weights `w`, normals `n`, and -view directions `v`."""
n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1)
return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1))
<fim_middle>def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) | def integrated_dir_enc_fn(xyz, kappa_inv):
"""Function returning integrated directional encoding (IDE).
Args:
xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at.
kappa_inv: [..., 1] reciprocal of the concentration parameter of the von
Mises-Fisher distribution.
Returns:
An array with the resulting IDE.
"""
x = xyz[Ellipsis, 0:1]
y = xyz[Ellipsis, 1:2]
z = xyz[Ellipsis, 2:3]
# Compute z Vandermonde matrix.
vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1)
# Compute x+iy Vandermonde matrix.
vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1)
# Get spherical harmonics.
sph_harms = vmxy * math_lib.matmul(vmz, mat)
# Apply attenuation function using the von Mises-Fisher distribution
# concentration parameter, kappa.
sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1)
ide = sph_harms * jnp.exp(-sigma * kappa_inv)
# Split into real and imaginary parts and return
return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
<fim_suffix>
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False | def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/train_utils.py
def summarize_tree(fn, tree, ancestry=(), max_depth=3):
"""Flatten 'tree' while 'fn'-ing values and formatting keys like/this."""
stats = {}
for k, v in tree.items():
name = ancestry + (k,)
stats['/'.join(name)] = fn(v)
if hasattr(v, 'items') and len(ancestry) < (max_depth - 1):
stats.update(summarize_tree(fn, v, ancestry=name, max_depth=max_depth))
return stats
# camp_zipnerf/internal/datasets.py
def generate_flattened_ray_batch(
self, cam_idx, n_samples=10000
) -> utils.Batch:
"""Generate flattened ray batch for a specified camera in the dataset."""
images_flattened, indices_flattened = flatten_data(
self.images[cam_idx][None]
)
n_pixels = images_flattened.shape[0]
mask_indices = np.random.randint(0, n_pixels, (n_samples,))
cam_idx = indices_flattened[..., 0][mask_indices]
pix_x_int = indices_flattened[..., 1][mask_indices]
pix_y_int = indices_flattened[..., 2][mask_indices]
rgb = images_flattened[mask_indices]
return self._make_ray_batch(
pix_x_int, pix_y_int, cam_idx, lossmult=None, rgb=rgb
)
# camp_zipnerf/internal/datasets.py
def _split_indices_with_spline_keyframes(
self,
config: configs.Config,
all_indices: np.ndarray,
test_indices: np.ndarray,
all_image_names: List[str],
) -> Tuple[np.ndarray, np.ndarray]:
"""Constructs train, test split indices when spline keyframes are present.
When using keyframe-based spline paths, we want to avoid training on
keyframes for two reasons: to use them for validation and to minimize the
number of blurred pixels used in training (spline keyframes may be
blurred). We add splint keyframes to the test split here.
Args:
config: Config object.
all_indices: indices of all images available for train and test.
test_indices: indices of additional test images.
all_image_names: filenames for all images.
Returns:
train_indices: image indices to use in the train split.
test_indices: image indices to use in the test split.
"""
def _sorted_union(subsets):
result = set()
for subset in subsets:
result = result.union(subset)
return list(sorted(result))
def _sorted_complement(superset, subset):
return list(sorted(set(superset) - set(subset)))
# Identify all sources for keyframes.
spline_keyframe_sources = []
if config.render_spline_keyframes:
print(
'Adding images from config.render_spline_keyframes to test '
f'split: {config.render_spline_keyframes}'
)
spline_keyframe_sources.append(config.render_spline_keyframes)
if config.render_spline_keyframes_choices:
print(
'Adding images from config.render_spline_keyframes_choices '
f'to test split: {config.render_spline_keyframes_choices}'
)
spline_keyframe_sources.extend(
config.render_spline_keyframes_choices.split(',')
)
spline_keyframe_indices = _sorted_union([
camera_utils.identify_file_indices(source, all_image_names)
for source in spline_keyframe_sources
])
test_indices = _sorted_union([test_indices, spline_keyframe_indices])
train_indices = _sorted_complement(all_indices, test_indices)
return np.array(train_indices), np.array(test_indices)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
<fim_suffix>
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range)) | def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range)) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
<fim_suffix>
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False | def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
<fim_suffix>
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False | def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/train_utils.py
def summarize_tree(fn, tree, ancestry=(), max_depth=3):
"""Flatten 'tree' while 'fn'-ing values and formatting keys like/this."""
stats = {}
for k, v in tree.items():
name = ancestry + (k,)
stats['/'.join(name)] = fn(v)
if hasattr(v, 'items') and len(ancestry) < (max_depth - 1):
stats.update(summarize_tree(fn, v, ancestry=name, max_depth=max_depth))
return stats
# camp_zipnerf/internal/datasets.py
def generate_flattened_ray_batch(
self, cam_idx, n_samples=10000
) -> utils.Batch:
"""Generate flattened ray batch for a specified camera in the dataset."""
images_flattened, indices_flattened = flatten_data(
self.images[cam_idx][None]
)
n_pixels = images_flattened.shape[0]
mask_indices = np.random.randint(0, n_pixels, (n_samples,))
cam_idx = indices_flattened[..., 0][mask_indices]
pix_x_int = indices_flattened[..., 1][mask_indices]
pix_y_int = indices_flattened[..., 2][mask_indices]
rgb = images_flattened[mask_indices]
return self._make_ray_batch(
pix_x_int, pix_y_int, cam_idx, lossmult=None, rgb=rgb
)
# camp_zipnerf/internal/datasets.py
def _split_indices_with_spline_keyframes(
self,
config: configs.Config,
all_indices: np.ndarray,
test_indices: np.ndarray,
all_image_names: List[str],
) -> Tuple[np.ndarray, np.ndarray]:
"""Constructs train, test split indices when spline keyframes are present.
When using keyframe-based spline paths, we want to avoid training on
keyframes for two reasons: to use them for validation and to minimize the
number of blurred pixels used in training (spline keyframes may be
blurred). We add splint keyframes to the test split here.
Args:
config: Config object.
all_indices: indices of all images available for train and test.
test_indices: indices of additional test images.
all_image_names: filenames for all images.
Returns:
train_indices: image indices to use in the train split.
test_indices: image indices to use in the test split.
"""
def _sorted_union(subsets):
result = set()
for subset in subsets:
result = result.union(subset)
return list(sorted(result))
def _sorted_complement(superset, subset):
return list(sorted(set(superset) - set(subset)))
# Identify all sources for keyframes.
spline_keyframe_sources = []
if config.render_spline_keyframes:
print(
'Adding images from config.render_spline_keyframes to test '
f'split: {config.render_spline_keyframes}'
)
spline_keyframe_sources.append(config.render_spline_keyframes)
if config.render_spline_keyframes_choices:
print(
'Adding images from config.render_spline_keyframes_choices '
f'to test split: {config.render_spline_keyframes_choices}'
)
spline_keyframe_sources.extend(
config.render_spline_keyframes_choices.split(',')
)
spline_keyframe_indices = _sorted_union([
camera_utils.identify_file_indices(source, all_image_names)
for source in spline_keyframe_sources
])
test_indices = _sorted_union([test_indices, spline_keyframe_indices])
train_indices = _sorted_complement(all_indices, test_indices)
return np.array(train_indices), np.array(test_indices)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
<fim_suffix>
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot | def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
<fim_suffix>
return result_fn
return decorator
<fim_middle>def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result() | def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result() | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
<fim_suffix>
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>try:
for item in fn(*args, **kwargs):
results_queue.put(item) | try:
for item in fn(*args, **kwargs):
results_queue.put(item) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
<fim_suffix>
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0) | try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
<fim_suffix>
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0) | try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
<fim_suffix>
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>try:
for item in fn(*args, **kwargs):
results_queue.put(item) | try:
for item in fn(*args, **kwargs):
results_queue.put(item) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
<fim_suffix>
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0) | try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
<fim_suffix>
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>try:
for item in fn(*args, **kwargs):
results_queue.put(item) | try:
for item in fn(*args, **kwargs):
results_queue.put(item) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
<fim_suffix>
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value | while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value | WHILE | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
<fim_suffix>
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value | while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value | WHILE | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
<fim_suffix>
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value | while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
except queue.Empty:
continue
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value | WHILE | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/image_io.py
def load_exif(pth):
"""Load EXIF data for an image."""
with utils.open_file(pth, 'rb') as f:
image_pil = Image.open(f)
exif_pil = image_pil._getexif() # pylint: disable=protected-access
if exif_pil is not None:
exif = {
ExifTags.TAGS[k]: v for k, v in exif_pil.items() if k in ExifTags.TAGS
}
else:
exif = {}
return exif
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
<fim_suffix>
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>except queue.Empty:
continue | except queue.Empty:
continue | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/stepfun.py
def integrate_weights(w):
"""Compute the cumulative sum of w, assuming all weight vectors sum to 1.
The output's size on the last dimension is one greater than that of the input,
because we're computing the integral corresponding to the endpoints of a step
function, not the integral of the interior/bin values.
Args:
w: Tensor, which will be integrated along the last axis. This is assumed to
sum to 1 along the last axis, and this function will (silently) break if
that is not the case.
Returns:
cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1
"""
cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1))
shape = cw.shape[:-1] + (1,)
# Ensure that the CDF starts with exactly 0 and ends with exactly 1.
cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1)
return cw0
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/datasets.py
def peek(self):
"""Peek at the next training batch or test example without dequeuing it.
Returns:
batch: utils.Batch, contains `rays` and their associated metadata.
"""
x = copy.copy(self._queue.queue[0]) # Make a copy of front of queue.
if self.split == utils.DataSplit.TRAIN:
return utils.shard(x)
else:
return jax.device_put(x)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
<fim_suffix>
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>except queue.Empty:
continue | except queue.Empty:
continue | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/utils.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/datasets.py
def __init__(self, dataset: Dataset):
super().__init__()
self._queue = queue.Queue(3) # Set prefetch buffer to 3 batches.
self.daemon = True # Sets parent Thread to be a daemon.
self.split = dataset.split
self.dataset = dataset
self._test_camera_idx = 0
self._n_examples = dataset._n_examples
# Seed the queue with one batch to avoid race condition.
if self.split == utils.DataSplit.TRAIN:
# TODO(bmild): Move _next_train here as well.
self._next_fn = dataset._next_train
else:
self._next_fn = self._next_test
self._queue.put(self._next_fn())
self.start()
# camp_zipnerf/internal/camera_utils.py
def identify_file_indices(
dir_or_text_file, file_names
):
"""Computes indices for a subset of files out of a larger list."""
# Load file names.
subset_names = identify_file_names(dir_or_text_file)
# COLMAP sometimes doesn't reconstruct all images, which results in some files
# being missing.
if not set(subset_names).issubset(file_names):
subset_names_missing_from_file_names = set(subset_names) - set(file_names)
logging.warning(
'Some files from subset are missing in the file names:\n%s',
' '.join(str(x) for x in subset_names_missing_from_file_names),
)
missing_subset_names_threshold = len(
subset_names_missing_from_file_names
) / len(subset_names)
if (
missing_subset_names_threshold
> _IDENTIFY_FILE_INDICES_MISSING_FRACTION_ERROR_THRESHOLD
):
raise ValueError(
f'{missing_subset_names_threshold*100}% of subset files is missing'
f' from file_names: {subset_names_missing_from_file_names}'
)
file_names_set = set(file_names)
# Get indices corresponding to the subset filenames. Ensure that the order
# used in subset_names is preserved.
indices = [file_names.index(n) for n in subset_names if n in file_names_set]
indices = np.array(indices)
return indices
# camp_zipnerf/internal/datasets.py
def run(self):
while True:
self._queue.put(self._next_fn())
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions."""
import concurrent
import enum
import os
import queue
import threading
import time
from typing import Any, Callable, Iterable, Optional, TypeVar, Union
from absl import logging
import flax
import jax
from jax import random
import jax.numpy as jnp
import numpy as np
_Array = Union[np.ndarray, jnp.ndarray]
@flax.struct.dataclass
class Rays:
"""All tensors must have the same num_dims and first n-1 dims must match.
This dataclass contains spatially meaningful quantities associated with
the ray that can be calculated by the function casting the ray, as well as
all metadata necessary for the ray to be rendered by the Model class.
"""
origins: Optional[_Array] = None
directions: Optional[_Array] = None
viewdirs: Optional[_Array] = None
radii: Optional[_Array] = None
imageplane: Optional[_Array] = None
pixels: Optional[_Array] = None
lossmult: Optional[_Array] = None
near: Optional[_Array] = None
far: Optional[_Array] = None
cam_idx: Optional[_Array] = None
exposure_idx: Optional[_Array] = None
exposure_values: Optional[_Array] = None
device_idx: Optional[_Array] = None
def generate_random_rays(
rng,
n,
origin_lo,
origin_hi,
radius_lo,
radius_hi,
near_lo,
near_hi,
far_lo,
far_hi,
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
"""Generate a random Rays datastructure."""
key, rng = random.split(rng)
origins = random.uniform(
key, shape=[n, 3], minval=origin_lo, maxval=origin_hi
)
key, rng = random.split(rng)
directions = random.normal(key, shape=[n, 3])
directions /= jnp.sqrt(
jnp.maximum(
jnp.finfo(jnp.float32).tiny,
jnp.sum(directions**2, axis=-1, keepdims=True),
)
)
viewdirs = directions
key, rng = random.split(rng)
radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi)
key, rng = random.split(rng)
near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi)
key, rng = random.split(rng)
far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi)
imageplane = jnp.zeros([n, 2])
lossmult = jnp.zeros([n, 1])
key, rng = random.split(rng)
pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024)
int_scalar = jnp.int32(jnp.zeros([n, 1]))
exposure_kwargs = {}
if include_exposure_idx:
exposure_kwargs['exposure_idx'] = int_scalar
if include_exposure_values:
exposure_kwargs['exposure_values'] = jnp.zeros([n, 1])
if include_device_idx:
exposure_kwargs['device_idx'] = int_scalar
random_rays = Rays(
origins=origins,
directions=directions,
viewdirs=viewdirs,
radii=radii,
imageplane=imageplane,
pixels=pixels,
lossmult=lossmult,
near=near,
far=far,
cam_idx=int_scalar,
**exposure_kwargs,
)
return random_rays
# Dummy Rays object that can be used to initialize NeRF model.
def dummy_rays(
include_exposure_idx = False,
include_exposure_values = False,
include_device_idx = False,
):
return generate_random_rays(
random.PRNGKey(0),
n=100,
origin_lo=-1.5,
origin_hi=1.5,
radius_lo=1e-5,
radius_hi=1e-3,
near_lo=0.0,
near_hi=1.0,
far_lo=10,
far_hi=10000,
include_exposure_idx=include_exposure_idx,
include_exposure_values=include_exposure_values,
include_device_idx=include_device_idx,
)
@flax.struct.dataclass
class Batch:
"""Data batch for NeRF training or testing.
This dataclass contains rays and also per-pixel data that is necessary for
computing the loss term or evaluating metrics but NOT necessary for rendering.
"""
rays: Rays
rgb: Optional[_Array] = None
disps: Optional[_Array] = None
normals: Optional[_Array] = None
alphas: Optional[_Array] = None
masks: Optional[_Array] = None
class DataSplit(enum.Enum):
"""Dataset split."""
TRAIN = 'train'
TEST = 'test'
class BatchingMethod(enum.Enum):
"""Draw rays randomly from a single image or all images, in each batch."""
ALL_IMAGES = 'all_images'
SINGLE_IMAGE = 'single_image'
def open_file(pth, mode='r'):
return open(pth, mode=mode)
def file_exists(pth):
return os.path.exists(pth)
def listdir(pth):
return os.listdir(pth)
def isdir(pth):
return os.path.isdir(pth)
def makedirs(pth):
if not file_exists(pth):
os.makedirs(pth)
def device_is_tpu():
return jax.local_devices()[0].platform == 'tpu'
def shard(xs):
"""Split data into shards for multiple devices along the first dimension."""
return jax.tree_util.tree_map(
lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs
)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
if padding > 0:
y = y[:-padding]
return y
def load_npy(pth):
"""Load an numpy array cast to float32."""
with open_file(pth, 'rb') as f:
x = np.load(f).astype(np.float32)
return x
def assert_valid_stepfun(t, y):
"""Assert that step function (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1] + 1:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a step function.'
)
def assert_valid_linspline(t, y):
"""Assert that piecewise linear spline (t, y) has a valid shape."""
if t.shape[-1] != y.shape[-1]:
raise ValueError(
f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.'
)
_FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]])
def iterate_in_separate_thread(
queue_size = 3,
):
"""Decorator factory that iterates a function in a separate thread.
Args:
queue_size: Keep at most queue_size elements in memory.
Returns:
Decorator that will iterate a function in a separate thread.
"""
def decorator(
fn,
):
def result_fn(*args, **kwargs):
results_queue = queue.Queue(queue_size)
populating_data = True
populating_data_lock = threading.Lock()
def thread_fn():
# Mark has_data as a variable that's outside of thread_fn
# Otherwise, `populating_data = True` creates a local variable
nonlocal populating_data
try:
for item in fn(*args, **kwargs):
results_queue.put(item)
finally:
# Set populating_data to False regardless of exceptions to stop
# iterations
with populating_data_lock:
populating_data = False
# Use executor + futures instead of Thread to propagate exceptions
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
thread_fn_future = executor.submit(thread_fn)
while True:
with populating_data_lock:
if not populating_data and results_queue.empty():
break
get_start = time.time()
try:
# Set timeout to allow for exceptions to be propagated.
next_value = results_queue.get(timeout=1.0)
<fim_suffix>
logging.info('Got data in %0.3fs', time.time() - get_start)
yield next_value
# Thread exception will be raised here
thread_fn_future.result()
return result_fn
return decorator
<fim_middle>except queue.Empty:
continue | except queue.Empty:
continue | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/train_utils.py
def summarize_tree(fn, tree, ancestry=(), max_depth=3):
"""Flatten 'tree' while 'fn'-ing values and formatting keys like/this."""
stats = {}
for k, v in tree.items():
name = ancestry + (k,)
stats['/'.join(name)] = fn(v)
if hasattr(v, 'items') and len(ancestry) < (max_depth - 1):
stats.update(summarize_tree(fn, v, ancestry=name, max_depth=max_depth))
return stats
# camp_zipnerf/internal/datasets.py
def generate_flattened_ray_batch(
self, cam_idx, n_samples=10000
) -> utils.Batch:
"""Generate flattened ray batch for a specified camera in the dataset."""
images_flattened, indices_flattened = flatten_data(
self.images[cam_idx][None]
)
n_pixels = images_flattened.shape[0]
mask_indices = np.random.randint(0, n_pixels, (n_samples,))
cam_idx = indices_flattened[..., 0][mask_indices]
pix_x_int = indices_flattened[..., 1][mask_indices]
pix_y_int = indices_flattened[..., 2][mask_indices]
rgb = images_flattened[mask_indices]
return self._make_ray_batch(
pix_x_int, pix_y_int, cam_idx, lossmult=None, rgb=rgb
)
# camp_zipnerf/internal/datasets.py
def _split_indices_with_spline_keyframes(
self,
config: configs.Config,
all_indices: np.ndarray,
test_indices: np.ndarray,
all_image_names: List[str],
) -> Tuple[np.ndarray, np.ndarray]:
"""Constructs train, test split indices when spline keyframes are present.
When using keyframe-based spline paths, we want to avoid training on
keyframes for two reasons: to use them for validation and to minimize the
number of blurred pixels used in training (spline keyframes may be
blurred). We add splint keyframes to the test split here.
Args:
config: Config object.
all_indices: indices of all images available for train and test.
test_indices: indices of additional test images.
all_image_names: filenames for all images.
Returns:
train_indices: image indices to use in the train split.
test_indices: image indices to use in the test split.
"""
def _sorted_union(subsets):
result = set()
for subset in subsets:
result = result.union(subset)
return list(sorted(result))
def _sorted_complement(superset, subset):
return list(sorted(set(superset) - set(subset)))
# Identify all sources for keyframes.
spline_keyframe_sources = []
if config.render_spline_keyframes:
print(
'Adding images from config.render_spline_keyframes to test '
f'split: {config.render_spline_keyframes}'
)
spline_keyframe_sources.append(config.render_spline_keyframes)
if config.render_spline_keyframes_choices:
print(
'Adding images from config.render_spline_keyframes_choices '
f'to test split: {config.render_spline_keyframes_choices}'
)
spline_keyframe_sources.extend(
config.render_spline_keyframes_choices.split(',')
)
spline_keyframe_indices = _sorted_union([
camera_utils.identify_file_indices(source, all_image_names)
for source in spline_keyframe_sources
])
test_indices = _sorted_union([test_indices, spline_keyframe_indices])
train_indices = _sorted_complement(all_indices, test_indices)
return np.array(train_indices), np.array(test_indices)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
<fim_suffix>
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
@safe_fn.defjvp
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>@jax.custom_jvp | @jax.custom_jvp | ANNOTATION | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>camp_zipnerf/internal/math.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# camp_zipnerf/internal/train_utils.py
def summarize_tree(fn, tree, ancestry=(), max_depth=3):
"""Flatten 'tree' while 'fn'-ing values and formatting keys like/this."""
stats = {}
for k, v in tree.items():
name = ancestry + (k,)
stats['/'.join(name)] = fn(v)
if hasattr(v, 'items') and len(ancestry) < (max_depth - 1):
stats.update(summarize_tree(fn, v, ancestry=name, max_depth=max_depth))
return stats
# camp_zipnerf/internal/datasets.py
def generate_flattened_ray_batch(
self, cam_idx, n_samples=10000
) -> utils.Batch:
"""Generate flattened ray batch for a specified camera in the dataset."""
images_flattened, indices_flattened = flatten_data(
self.images[cam_idx][None]
)
n_pixels = images_flattened.shape[0]
mask_indices = np.random.randint(0, n_pixels, (n_samples,))
cam_idx = indices_flattened[..., 0][mask_indices]
pix_x_int = indices_flattened[..., 1][mask_indices]
pix_y_int = indices_flattened[..., 2][mask_indices]
rgb = images_flattened[mask_indices]
return self._make_ray_batch(
pix_x_int, pix_y_int, cam_idx, lossmult=None, rgb=rgb
)
# camp_zipnerf/internal/datasets.py
def _split_indices_with_spline_keyframes(
self,
config: configs.Config,
all_indices: np.ndarray,
test_indices: np.ndarray,
all_image_names: List[str],
) -> Tuple[np.ndarray, np.ndarray]:
"""Constructs train, test split indices when spline keyframes are present.
When using keyframe-based spline paths, we want to avoid training on
keyframes for two reasons: to use them for validation and to minimize the
number of blurred pixels used in training (spline keyframes may be
blurred). We add splint keyframes to the test split here.
Args:
config: Config object.
all_indices: indices of all images available for train and test.
test_indices: indices of additional test images.
all_image_names: filenames for all images.
Returns:
train_indices: image indices to use in the train split.
test_indices: image indices to use in the test split.
"""
def _sorted_union(subsets):
result = set()
for subset in subsets:
result = result.union(subset)
return list(sorted(result))
def _sorted_complement(superset, subset):
return list(sorted(set(superset) - set(subset)))
# Identify all sources for keyframes.
spline_keyframe_sources = []
if config.render_spline_keyframes:
print(
'Adding images from config.render_spline_keyframes to test '
f'split: {config.render_spline_keyframes}'
)
spline_keyframe_sources.append(config.render_spline_keyframes)
if config.render_spline_keyframes_choices:
print(
'Adding images from config.render_spline_keyframes_choices '
f'to test split: {config.render_spline_keyframes_choices}'
)
spline_keyframe_sources.extend(
config.render_spline_keyframes_choices.split(',')
)
spline_keyframe_indices = _sorted_union([
camera_utils.identify_file_indices(source, all_image_names)
for source in spline_keyframe_sources
])
test_indices = _sorted_union([test_indices, spline_keyframe_indices])
train_indices = _sorted_complement(all_indices, test_indices)
return np.array(train_indices), np.array(test_indices)
"""
# coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mathy utility functions."""
import functools
import jax
import jax.numpy as jnp
import numpy as np
tiny_val = np.float32(np.finfo(np.float32).tiny)
min_val = np.float32(np.finfo(np.float32).min)
max_val = np.float32(np.finfo(np.float32).max)
def laplace_cdf(x, beta):
alpha = 1 / beta
return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1))
def scaled_softplus(x, scale=100.0):
return (1.0 / scale) * jax.nn.softplus(scale * x)
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
def unstack(x, axis=0):
return tuple(
jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis)
)
@jax.custom_jvp
def plus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf)
)
@jax.custom_jvp
def minus_eps(x):
return jnp.where(
jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf)
)
@plus_eps.defjvp
def plus_eps_jvp(primals, tangents):
"""Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return plus_eps(*primals), tangents[0]
@minus_eps.defjvp
def minus_eps_jvp(primals, tangents):
"""Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined)."""
return minus_eps(*primals), tangents[0]
@jax.custom_jvp
def expm1(x):
"""jnp.expm1() has inaccurate gradients when x << 0, this doesn't."""
return jnp.expm1(x)
@expm1.defjvp
def expm1_jvp(primals, tangents):
return expm1(*primals), tangents[0] * jnp.exp(primals[0])
def safe_trig_helper(x, fn, t=100 * jnp.pi):
"""Helper function used by safe_cos/safe_sin: mods x before sin()/cos()."""
return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t)))
def safe_cos(x):
"""jnp.cos() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.cos)
def safe_sin(x):
"""jnp.sin() on a TPU may NaN out for large values."""
return safe_trig_helper(x, jnp.sin)
@jax.custom_vjp
def safe_arctan2(x1, x2):
return safe_arctan2_fwd(x1, x2)[0]
def safe_arctan2_fwd(x1, x2):
return jnp.arctan2(x1, x2), (x1, x2)
def safe_arctan2_bwd(res, g):
x1, x2 = res
denom = remove_zero(x1**2 + x2**2)
d1 = g * (x2 / denom)
d2 = g * (-x1 / denom)
return d1, d2
safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd)
def generate_clip_nograd_fn(a_min, a_max):
"""Generates a function that clips to [a_min, a_max] with no grad effects."""
@jax.custom_jvp
def clip_nograd(a):
"""Clamps `a` from above and below."""
return jnp.clip(a, a_min, a_max)
@clip_nograd.defjvp
def clip_nograd_jvp(primals, tangents):
"""Override clips()'s gradient to be a no-op."""
return clip_nograd(primals[0]), tangents[0]
return clip_nograd
clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val)
clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val)
def clip_pos(x):
"""Clamps `x` from below to be positive."""
return jnp.maximum(tiny_val, x)
def safe_sign(x):
"""jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0."""
return jnp.where(x < 0, -1, +1)
def remove_zero(x):
"""Shifts `x` away from 0."""
return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x)
def clip_finite(x):
return jnp.clip(x, min_val, max_val)
@jax.custom_vjp
def safe_div(n, d):
"""Divide `n` by `d` but the value and gradient never nan out."""
return safe_div_fwd(n, d)[0]
def safe_div_fwd(n, d):
r = jnp.clip(n / remove_zero(d), min_val, max_val)
return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r)
def safe_div_bwd(res, g):
d, r = res
dn = jnp.clip(g / remove_zero(d), min_val, max_val)
dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val)
return dn, dd
safe_div.defvjp(safe_div_fwd, safe_div_bwd)
def generate_safe_fn(fn, grad_fn, x_range):
"""Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes."""
@jax.custom_jvp
def safe_fn(x):
"""fn() with clipped inputs."""
return fn(jnp.clip(x, *x_range))
<fim_suffix>
def safe_fn_jvp(primals, tangents):
"""Backpropagate using the gradient and clipped inputs."""
(x,) = primals
(x_dot,) = tangents
y = safe_fn(x)
y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot)
return y, y_dot
return safe_fn
# These safe_* functions need to be wrapped in no-op function definitions for
# gin to recognize them, otherwise they could just be calls to generate_safe_fn.
def safe_log(x):
return generate_safe_fn(
jnp.log,
lambda x, _, x_dot: x_dot / x,
(tiny_val, max_val),
)(x)
def safe_exp(x):
return generate_safe_fn(
jnp.exp,
lambda _, y, x_dot: y * x_dot,
(min_val, np.nextafter(np.log(max_val), np.float32(0))),
)(x)
def safe_sqrt(x):
return generate_safe_fn(
jnp.sqrt,
lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)),
(0, max_val),
)(x)
def safe_log1p(x):
return generate_safe_fn(
jnp.log1p,
lambda x, _, x_dot: x_dot / (1 + x),
(np.nextafter(np.float32(-1), np.float32(0)), max_val),
)(x)
def safe_expm1(x):
return generate_safe_fn(
expm1, # Note that we wrap around our more accurate expm1.
lambda x, _, x_dot: jnp.exp(x) * x_dot,
(min_val, np.nextafter(np.log1p(max_val), np.float32(0))),
)(x)
def safe_arccos(x):
"""jnp.arccos(x) where x is clipped to [-1, 1]."""
y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1)))
return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y))
def apply_fn_to_grad(grad_fn):
"""Applies a scalar `grad_fn` function to the gradient of the input."""
@jax.custom_vjp
def fn_out(x):
return x
fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),))
return fn_out
def select(cond_pairs, default):
"""A helpful wrapper around jnp.select() that is easier to read."""
return jnp.select(*zip(*cond_pairs), default)
def power_ladder_max_output(p):
"""The limit of power_ladder(x, p) as x goes to infinity."""
return select(
[
(p == -jnp.inf, 1),
(p >= 0, jnp.inf),
],
safe_div(p - 1, p),
)
def power_ladder(x, p, premult=None, postmult=None):
"""Tukey's power ladder, with a +1 on x, some scaling, and special cases."""
# Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1)
if premult is not None:
x = x * premult
xp = jnp.abs(x)
xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1))
p_safe = clip_finite_nograd(remove_zero(p))
y = safe_sign(x) * select(
[
(p == 1, xp),
(p == 0, safe_log1p(xp)),
(p == -jnp.inf, -safe_expm1(-xp)),
(p == jnp.inf, safe_expm1(xp)),
],
clip_finite_nograd(
jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1)
),
)
if postmult is not None:
y = y * postmult
return y
def inv_power_ladder(y, p, premult=None, postmult=None):
"""The inverse of `power_ladder()`."""
if postmult is not None:
y /= postmult
yp = jnp.abs(y)
p_safe = clip_finite_nograd(remove_zero(p))
y_max = minus_eps(power_ladder_max_output(p))
yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad.
x = safe_sign(y) * select(
[
(p == 1, yp),
(p == 0, safe_expm1(yp)),
(p == -jnp.inf, -safe_log1p(-yp)),
(p == jnp.inf, safe_log1p(yp)),
],
jnp.abs(p_safe - 1)
* (
((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1
),
)
if premult is not None:
x /= premult
return x
def log_lerp(t, v0, v1):
"""Interpolate log-linearly from `v0` (t=0) to `v1` (t=1)."""
if v0 <= 0 or v1 <= 0:
raise ValueError(f'Interpolants {v0} and {v1} must be positive.')
lv0 = jnp.log(v0)
lv1 = jnp.log(v1)
return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0)
def approx_erf(x):
"""An approximation of erf() that is accurate to within 0.007."""
return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2))
def create_learning_rate_decay(**kwargs):
"""A partial evaluation of learning rate decay that can be used with gin."""
return functools.partial(learning_rate_decay, **kwargs)
def learning_rate_decay(
step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1
):
"""Continuous learning rate decay function.
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
Args:
step: int, the current optimization step.
lr_init: float, the initial learning rate.
lr_final: float, the final learning rate.
max_steps: int, the number of steps during optimization.
lr_delay_steps: int, the number of steps to delay the full learning rate.
lr_delay_mult: float, the multiplier on the rate when delaying it.
Returns:
lr: the learning for current step 'step'.
"""
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin(
0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
return delay_rate * log_lerp(step / max_steps, lr_init, lr_final)
def sorted_lookup(x, xp, fps, device_is_tpu):
"""Lookup `x` into locations `xp` , return indices and each `[fp]` value."""
if not isinstance(fps, tuple):
raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.')
if device_is_tpu:
# Identify the location in `xp` that corresponds to each `x`.
# The final `True` index in `mask` is the start of the matching interval.
mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None]
def find_interval(x):
# Grab the value where `mask` switches from True to False, and vice versa.
# This approach takes advantage of the fact that `x` is sorted.
x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2)
x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2)
return x0, x1
idx0, idx1 = find_interval(jnp.arange(xp.shape[-1]))
vals = [find_interval(fp) for fp in fps]
else:
# jnp.searchsorted() has slightly different conventions for boundary
# handling than the rest of this codebase.
idx = jnp.vectorize(
lambda a, v: jnp.searchsorted(a, v, side='right'),
signature='(n),(m)->(m)',
)(xp, x)
idx1 = jnp.minimum(idx, xp.shape[-1] - 1)
idx0 = jnp.maximum(idx - 1, 0)
vals = []
for fp in fps:
fp0 = jnp.take_along_axis(fp, idx0, axis=-1)
fp1 = jnp.take_along_axis(fp, idx1, axis=-1)
vals.append((fp0, fp1))
return (idx0, idx1), vals
def sorted_interp(
x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2
):
"""A version of interp() where xp and fp must be sorted."""
(xp0, xp1), (fp0, fp1) = sorted_lookup(
x, xp, (xp, fp), device_is_tpu=device_is_tpu
)[1]
offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1)
ret = fp0 + offset * (fp1 - fp0)
return ret
def searchsorted(a, v, device_is_tpu):
"""Behaves like jnp.searchsorted, excluding boundary conditions."""
return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0]
def override_gradient(fval, bval):
"""Use `fval` in the forward pass but `bval` in the backward pass."""
# Note that the parentheses are needed to avoid catastrophic cancellation.
return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval))
def average_across_multisamples(x):
"""Function that averages grid query results across the multisample dimension."""
return jnp.mean(x, axis=-2)
def noop(x):
return x
@jax.custom_jvp
def fake_clip(a, a_min, a_max):
"""jnp.clip() but the gradient doesn't get clipped on the backward pass."""
return jnp.clip(a, a_min, a_max)
@fake_clip.defjvp
def fake_clip_jvp(primals, tangents):
"""Override fake_clip()'s gradient so that it's a no-op."""
return jnp.clip(*primals), tangents[0]
@jax.jit
def general_lossfun(x, alpha, scale):
r"""This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x: The residual for which the loss is being computed. x can have any shape,
and alpha and scale will be broadcasted to match x's shape if necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
maxval = 1e15
# A "safe" versions of expm1 that will not NaN-out on large inputs.
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43))
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# Large values of |x| can cause non-finite gradients.
x = fake_clip(x, -maxval, maxval)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * (x / scale)**2
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(a - 2))
# The loss when not in one of the special casess.
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, jnp.log1p(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
<fim_middle>@safe_fn.defjvp | @safe_fn.defjvp | ANNOTATION | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_persistence_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the persistence layer.
"""
raise NotImplementedError
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
# microagents/agents/agent_lifecycle.py
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
"""
from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
<fim_suffix>
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
<fim_middle>"""
Load all agents from the database.
""" | """
Load all agents from the database.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/sqlite_agent_persistence.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/memoize.py
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
# microagents/integrations/agent_persistence.py
def save_agent(self, agent_dict):
"""
Save the serialized agent to the persistence layer.
"""
raise NotImplementedError
# microagents/integrations/memoize.py
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
"""
import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
<fim_suffix>
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]<fim_middle>"""
Save the serialized agent to an SQLite database.
""" | """
Save the serialized agent to an SQLite database.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/microagent_manager.py
def cleanup_agents(self):
"""Remove all agents with status stopped = True"""
self.agent_lifecycle.cleanup_agents()
# microagents/agents/agent_persistence_manager.py
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
# microagents/gradio_ui/agent_manager.py
def get_agents_info(self) -> List[dict]:
"""
Retrieve information about all agents for display in Gradio.
"""
agents = self.manager.get_agents()
agents_sorted = self.sort_agents(agents)
if not agents_sorted:
return []
return [self.format_agent_info(agent, agents_sorted) for agent in agents_sorted]
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
<fim_suffix>
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle>"""Remove all agents with status stopped = True in an efficient manner.""" | """Remove all agents with status stopped = True in an efficient manner.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_response.py
def _generate_chat_response(self, system_prompt, react_prompt):
return self.openai_wrapper.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": react_prompt}
]
)
# microagents/agents/agent_evaluation.py
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try:
formatted_prompt = AGENT_EVALUATION_PROMPT.format(input=input_text, prompt=prompt, output=output)
response = self.openai_api.chat_completion(messages=[{"role": "system", "content": formatted_prompt}])
if "5" in response or "4" in response:
return True
else:
return False
except Exception as error:
logger.exception(f"Agent evaluation error: {error}")
raise
# microagents/agents/response_extraction.py
def extract_response_from_prompt(self, prompt: str, question: str) -> str:
"""
Extracts a response based on the given prompt and question using the OpenAI GPT model.
Args:
prompt (str): The initial prompt for the model.
question (str): The user's question to be appended to the prompt.
Returns:
str: The extracted response.
Raises:
ValueError: If any of the arguments are not of expected type or empty.
"""
if not prompt or not isinstance(prompt, str):
raise ValueError("prompt must be a non-empty string")
if not question or not isinstance(question, str):
raise ValueError("question must be a non-empty string")
formatted_prompt = EXTRACTION_PROMPT_TEMPLATE.format(question=question, prompt=prompt)
messages = [
{"role": "system", "content": STANDARD_SYSTEM_PROMPT},
{"role": "user", "content": formatted_prompt}
]
return self.openai_wrapper.chat_completion(
messages=messages,
max_tokens=100,
)
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
<fim_suffix>
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle>"""
Generates a prompt for the LLM based on the given goal and sample input.
""" | """
Generates a prompt for the LLM based on the given goal and sample input.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
# microagents/integrations/sqlite_agent_persistence.py
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
# microagents/integrations/sqlite_agent_persistence.py
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
<fim_suffix>
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>"""
Memoization decorator that caches the output of a method in a SQLite
database.
""" | """
Memoization decorator that caches the output of a method in a SQLite
database.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/microagent_manager.py
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
# microagents/prompt_management/prompt_evolution.py
def _truncate_conversation(self, conversation: str) -> str:
"""Truncates the conversation to the last 1000 characters if it's too long."""
if len(conversation) > 1000:
return conversation[:200] + "..." + conversation[-1000:]
return conversation
# microagents/agents/agent_persistence_manager.py
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
<fim_suffix>
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle>"""Creates the prime agent and adds it to the agent list.""" | """Creates the prime agent and adds it to the agent list.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
# microagents/agents/agent_lifecycle.py
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
# microagents/gradio_ui/agent_manager.py
def update_agent_status(self, purpose: str, new_status: str):
"""
Update the status of a specific agent.
"""
agent = next((a for a in self.manager.get_agents() if a.purpose == purpose), None)
if agent:
agent.update_status(new_status)
self.manager.agent_lifecycle.save_agent(agent)
"""
import logging
from typing import List, Optional, Any
from agents.agent_lifecycle import AgentLifecycle
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from integrations.openaiwrapper import OpenAIAPIWrapper
logger= logging.getLogger()
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper, max_agents: int = 20, db_filename : str = "agents.db"):
self.max_agents = max_agents
self.openai_wrapper = openai_wrapper
self.agent_persistence = AgentPersistenceManager(db_filename)
self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)
self.load_agents()
def stop_all_agents(self) -> None:
"""Stops all agents."""
self.agent_lifecycle.stop_all_agents()
def cleanup_agents(self):
"""Remove all agents with status stopped = True"""
self.agent_lifecycle.cleanup_agents()
def load_agents(self):
"""Loads agents from the database."""
loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)
self.agent_lifecycle.agents.extend(loaded_agents)
logger.info(f"Loaded {len(loaded_agents)} agents from the database.")
def get_agents(self) -> List[Any]:
<fim_suffix>
self.cleanup_agents()
return self.agent_lifecycle.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
logger.info(f"Agent {agent.purpose} is currently idle.")<fim_middle>"""Returns the list of agents.""" | """Returns the list of agents.""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
<fim_suffix>
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
""" | """
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
# microagents/agents/microagent.py
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
# microagents/integrations/sqlite_agent_persistence.py
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
"""
import logging
from typing import List, Optional, Any
from agents.agent_lifecycle import AgentLifecycle
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from integrations.openaiwrapper import OpenAIAPIWrapper
logger= logging.getLogger()
class MicroAgentManager:
"""
Manages the creation and retrieval of micro agents.
"""
def __init__(self, openai_wrapper: OpenAIAPIWrapper, max_agents: int = 20, db_filename : str = "agents.db"):
self.max_agents = max_agents
self.openai_wrapper = openai_wrapper
self.agent_persistence = AgentPersistenceManager(db_filename)
self.agent_lifecycle = AgentLifecycle(self.openai_wrapper, self.agent_persistence, max_agents)
self.load_agents()
def stop_all_agents(self) -> None:
"""Stops all agents."""
self.agent_lifecycle.stop_all_agents()
def cleanup_agents(self):
<fim_suffix>
self.agent_lifecycle.cleanup_agents()
def load_agents(self):
"""Loads agents from the database."""
loaded_agents = self.agent_persistence.load_all_agents(self.agent_lifecycle, self.openai_wrapper)
self.agent_lifecycle.agents.extend(loaded_agents)
logger.info(f"Loaded {len(loaded_agents)} agents from the database.")
def get_agents(self) -> List[Any]:
"""Returns the list of agents."""
self.cleanup_agents()
return self.agent_lifecycle.agents
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
def display_agent_status(self):
"""Displays the current status of all agents."""
for agent in self.get_agents():
logger.info(f"Agent {agent.purpose}: Status = {agent.current_status}, Evolve Count = {agent.evolve_count}")
def display_active_agent_tree(self):
"""Displays a tree view of active agent relationships."""
for agent in self.get_agents():
if agent.active_agents:
logger.info(f"Agent {agent.purpose} is calling: {agent.active_agents}")
else:
logger.info(f"Agent {agent.purpose} is currently idle.")<fim_middle>"""Remove all agents with status stopped = True""" | """Remove all agents with status stopped = True""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/logic.py
def display_agent_info(self, table_data):
time.sleep(2)
self.app.table.clear()
if len(table_data) > 0:
for row in table_data:
styled_row = [Text(str(cell), no_wrap=False, overflow="fold") for cell in row]
self.app.table.add_row(*styled_row)
# microagents/agents/microagent_manager.py
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
# microagents/integrations/openaiwrapper.py
def get_embedding(self, text):
"""
Retrieves the embedding for the given text.
:param text: The text for which embedding is required.
:return: The embedding for the given text.
"""
start_time = time.time()
retries = 0
while time.time() - start_time < self.timeout:
try:
response = self._openai_client.embeddings.create(input=text, model=ENGINE)
data = {
"data": [],
"model": response.model,
"usage" : {
"prompt_tokens": response.usage.prompt_tokens,
"total_tokens": response.usage.total_tokens
}
}
for emb in response.data:
data["data"].append({
"embedding": emb.embedding,
"index": emb.index
})
return data
except openai.OpenAIError as e:
logging.error(f"OpenAI API error: {e}")
retries += 1
if retries >= self.max_retries:
raise
time.sleep(RETRY_SLEEP_DURATION)
if f"{e}".startswith("Rate limit"):
print("Rate limit reached... sleeping for 20 seconds")
start_time+=20
time.sleep(20)
raise TimeoutError("API call timed out")
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
<fim_suffix>
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
""" | """
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
""" | BLOCK_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
<fim_suffix>
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>self.parent_id = parent_id | self.parent_id = parent_id | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
<fim_suffix>
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>self.id = str(uuid.uuid4()) | self.id = str(uuid.uuid4()) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/parallel_agent_executor.py
def determine_winning_agent(self):
if not self.response_queue.empty():
return self.response_queue.get()
return None, None
# microagents/agents/microagent.py
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
# microagents/gradio_ui/agent_manager.py
def get_agents_info(self) -> List[dict]:
"""
Retrieve information about all agents for display in Gradio.
"""
agents = self.manager.get_agents()
agents_sorted = self.sort_agents(agents)
if not agents_sorted:
return []
return [self.format_agent_info(agent, agents_sorted) for agent in agents_sorted]
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
<fim_suffix>
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>result = self._fetch_from_cache(arg_hash) | result = self._fetch_from_cache(arg_hash) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
<fim_suffix>
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>self.dynamic_prompt = initial_prompt | self.dynamic_prompt = initial_prompt | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/sqlite_agent_persistence.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the persistence layer.
"""
raise NotImplementedError
# microagents/agents/agent_persistence_manager.py
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent)
return agents
# microagents/integrations/memoize.py
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
"""
import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
<fim_suffix>
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]<fim_middle>cursor = conn.cursor() | cursor = conn.cursor() | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
<fim_suffix>
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0] | similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0] | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/sqlite_agent_persistence.py
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
# microagents/agents/microagent.py
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
# microagents/integrations/sqlite_agent_persistence.py
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
<fim_suffix>
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>cursor = self.connection.cursor() | cursor = self.connection.cursor() | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
<fim_suffix>
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>self.parent_id = None | self.parent_id = None | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/sqlite_agent_persistence.py
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
# microagents/integrations/sqlite_agent_persistence.py
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
<fim_suffix>
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>self.connection = sqlite3.connect(self.filename) | self.connection = sqlite3.connect(self.filename) | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
<fim_suffix>
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>self.depth = depth | self.depth = depth | STATEMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_response.py
def _generate_chat_response(self, system_prompt, react_prompt):
return self.openai_wrapper.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": react_prompt}
]
)
# microagents/agents/agent_evaluation.py
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try:
formatted_prompt = AGENT_EVALUATION_PROMPT.format(input=input_text, prompt=prompt, output=output)
response = self.openai_api.chat_completion(messages=[{"role": "system", "content": formatted_prompt}])
if "5" in response or "4" in response:
return True
else:
return False
except Exception as error:
logger.exception(f"Agent evaluation error: {error}")
raise
# microagents/agents/response_extraction.py
def extract_response_from_prompt(self, prompt: str, question: str) -> str:
"""
Extracts a response based on the given prompt and question using the OpenAI GPT model.
Args:
prompt (str): The initial prompt for the model.
question (str): The user's question to be appended to the prompt.
Returns:
str: The extracted response.
Raises:
ValueError: If any of the arguments are not of expected type or empty.
"""
if not prompt or not isinstance(prompt, str):
raise ValueError("prompt must be a non-empty string")
if not question or not isinstance(question, str):
raise ValueError("question must be a non-empty string")
formatted_prompt = EXTRACTION_PROMPT_TEMPLATE.format(question=question, prompt=prompt)
messages = [
{"role": "system", "content": STANDARD_SYSTEM_PROMPT},
{"role": "user", "content": formatted_prompt}
]
return self.openai_wrapper.chat_completion(
messages=messages,
max_tokens=100,
)
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
<fim_suffix>
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle>try:
return self.openai_wrapper.chat_completion(messages=messages) | try:
return self.openai_wrapper.chat_completion(messages=messages) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
<fim_suffix>
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity | try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/logic.py
def display_agent_info(self, table_data):
time.sleep(2)
self.app.table.clear()
if len(table_data) > 0:
for row in table_data:
styled_row = [Text(str(cell), no_wrap=False, overflow="fold") for cell in row]
self.app.table.add_row(*styled_row)
# microagents/agents/microagent_manager.py
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
# microagents/integrations/openaiwrapper.py
def get_embedding(self, text):
"""
Retrieves the embedding for the given text.
:param text: The text for which embedding is required.
:return: The embedding for the given text.
"""
start_time = time.time()
retries = 0
while time.time() - start_time < self.timeout:
try:
response = self._openai_client.embeddings.create(input=text, model=ENGINE)
data = {
"data": [],
"model": response.model,
"usage" : {
"prompt_tokens": response.usage.prompt_tokens,
"total_tokens": response.usage.total_tokens
}
}
for emb in response.data:
data["data"].append({
"embedding": emb.embedding,
"index": emb.index
})
return data
except openai.OpenAIError as e:
logging.error(f"OpenAI API error: {e}")
retries += 1
if retries >= self.max_retries:
raise
time.sleep(RETRY_SLEEP_DURATION)
if f"{e}".startswith("Rate limit"):
print("Rate limit reached... sleeping for 20 seconds")
start_time+=20
time.sleep(20)
raise TimeoutError("API call timed out")
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
<fim_suffix>
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format") | try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format") | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/microagent_manager.py
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
# microagents/agents/microagent_manager.py
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
# microagents/gradio_ui/agent_manager.py
def process_user_input(self, user_input: str) -> str:
"""
Process user input through a specified agent and return its response.
"""
try:
parallel_executor = ParallelAgentExecutor(self.manager)
delegated_response = parallel_executor.create_and_run_agents("Bootstrap Agent", 1, user_input)
return delegated_response
except Exception as e:
logger.exception(f"Error processing user input: {e}")
return "Error in processing input."
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
<fim_suffix>
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle>try:
self.agent_persistence.save_agent(agent) | try:
self.agent_persistence.save_agent(agent) | TRY | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
<fim_suffix>
<fim_middle>except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}") | except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}") | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_response.py
def _generate_chat_response(self, system_prompt, react_prompt):
return self.openai_wrapper.chat_completion(
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": react_prompt}
]
)
# microagents/agents/agent_evaluation.py
def evaluate(self, input_text: str, prompt: str, output: str) -> str:
"""
Returns evaluation agents response (score from 1-5)
"""
try:
formatted_prompt = AGENT_EVALUATION_PROMPT.format(input=input_text, prompt=prompt, output=output)
response = self.openai_api.chat_completion(messages=[{"role": "system", "content": formatted_prompt}])
if "5" in response or "4" in response:
return True
else:
return False
except Exception as error:
logger.exception(f"Agent evaluation error: {error}")
raise
# microagents/agents/response_extraction.py
def extract_response_from_prompt(self, prompt: str, question: str) -> str:
"""
Extracts a response based on the given prompt and question using the OpenAI GPT model.
Args:
prompt (str): The initial prompt for the model.
question (str): The user's question to be appended to the prompt.
Returns:
str: The extracted response.
Raises:
ValueError: If any of the arguments are not of expected type or empty.
"""
if not prompt or not isinstance(prompt, str):
raise ValueError("prompt must be a non-empty string")
if not question or not isinstance(question, str):
raise ValueError("question must be a non-empty string")
formatted_prompt = EXTRACTION_PROMPT_TEMPLATE.format(question=question, prompt=prompt)
messages = [
{"role": "system", "content": STANDARD_SYSTEM_PROMPT},
{"role": "user", "content": formatted_prompt}
]
return self.openai_wrapper.chat_completion(
messages=messages,
max_tokens=100,
)
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
<fim_suffix>
<fim_middle>except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return "" | except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return "" | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_lifecycle.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/microagent_manager.py
def create_agents(self) -> None:
"""Creates prime agents and logs the process."""
logger.info("Creating agents...")
try:
self.agent_lifecycle.create_prime_agent()
logger.info("Agents created successfully.")
except Exception as e:
logger.exception(f"Error in creating agents: {e}")
raise
# microagents/agents/microagent_manager.py
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
# microagents/gradio_ui/agent_manager.py
def process_user_input(self, user_input: str) -> str:
"""
Process user input through a specified agent and return its response.
"""
try:
parallel_executor = ParallelAgentExecutor(self.manager)
delegated_response = parallel_executor.create_and_run_agents("Bootstrap Agent", 1, user_input)
return delegated_response
except Exception as e:
logger.exception(f"Error processing user input: {e}")
return "Error in processing input."
"""
import logging
from typing import List
from agents.microagent import MicroAgent
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_similarity import AgentSimilarity
from agents.agent_persistence_manager import AgentPersistenceManager
from numpy import ndarray
from prompt_management.prompts import (
PRIME_PROMPT, PRIME_NAME,
PROMPT_ENGINEERING_SYSTEM_PROMPT,
PROMPT_ENGINEERING_TEMPLATE, EXAMPLES
)
logger = logging.getLogger()
DEFAULT_MAX_AGENTS = 2000
PRIME_AGENT_WEIGHT = 25
class AgentLifecycle:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agent_persistence_manager: AgentPersistenceManager, max_agents: int = DEFAULT_MAX_AGENTS):
self.agents: List[MicroAgent] = []
self.openai_wrapper = openai_wrapper
self.agent_persistence = agent_persistence_manager
self.max_agents = max_agents
def stop_all_agents(self) -> None:
"""Stops all agents."""
for agent in self.agents:
agent.stop()
def reset_all_agents(self) -> None:
"""Resets all agents."""
for agent in self.agents:
agent.reset()
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
def create_prime_agent(self) -> None:
"""Creates the prime agent and adds it to the agent list."""
prime_agent = MicroAgent(
PRIME_PROMPT, PRIME_NAME, 0, self,
self.openai_wrapper, PRIME_AGENT_WEIGHT, True, True
)
self.agents.append(prime_agent)
def add_agent(self, agent: MicroAgent) -> None:
"""Adds an agent to the list of agents."""
self.agents.append(agent)
def get_available_agents_for_agent(self, agent) -> List[MicroAgent]:
"""Returns the list of available agents for the given purpose."""
agent_id = agent.id
available_agents = [agent for agent in self.agents if agent.purpose != "Bootstrap Agent" and agent.working_agent]
for agent in available_agents:
if agent.parent_id != agent_id:
available_agents.remove(agent)
return available_agents
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, force_new: bool = False, parent_agent=None) -> MicroAgent:
"""
Retrieves or creates an agent based on the given purpose.
Optionally creates a new agent regardless of similarity if force_new is True.
"""
if not force_new:
agent_similarity = AgentSimilarity(self.openai_wrapper, self.agents)
purpose_embedding = agent_similarity.get_embedding(purpose)
closest_agent, highest_similarity = agent_similarity.find_closest_agent(purpose_embedding)
similarity_threshold = agent_similarity.calculate_similarity_threshold()
if highest_similarity >= similarity_threshold:
closest_agent.usage_count += 1
return closest_agent
return self._create_and_add_agent(purpose, depth, sample_input, parent_agent=parent_agent)
def _create_and_add_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> MicroAgent:
"""Helper method to create and add a new agent."""
if len(self.agents) >= self.max_agents:
self._remove_least_used_agent()
new_agent = MicroAgent(self._generate_llm_prompt(purpose, sample_input), purpose, depth, self, self.openai_wrapper, parent=parent_agent)
new_agent.usage_count = 1
self.agents.append(new_agent)
return new_agent
def _remove_least_used_agent(self):
"""Removes the least used agent."""
least_used_agent = min(self.agents, key=lambda agent: agent.usage_count)
self.agents.remove(least_used_agent)
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
<fim_suffix>
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
<fim_middle>except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise | except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/logic.py
def display_agent_info(self, table_data):
time.sleep(2)
self.app.table.clear()
if len(table_data) > 0:
for row in table_data:
styled_row = [Text(str(cell), no_wrap=False, overflow="fold") for cell in row]
self.app.table.add_row(*styled_row)
# microagents/agents/microagent_manager.py
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
# microagents/integrations/openaiwrapper.py
def get_embedding(self, text):
"""
Retrieves the embedding for the given text.
:param text: The text for which embedding is required.
:return: The embedding for the given text.
"""
start_time = time.time()
retries = 0
while time.time() - start_time < self.timeout:
try:
response = self._openai_client.embeddings.create(input=text, model=ENGINE)
data = {
"data": [],
"model": response.model,
"usage" : {
"prompt_tokens": response.usage.prompt_tokens,
"total_tokens": response.usage.total_tokens
}
}
for emb in response.data:
data["data"].append({
"embedding": emb.embedding,
"index": emb.index
})
return data
except openai.OpenAIError as e:
logging.error(f"OpenAI API error: {e}")
retries += 1
if retries >= self.max_retries:
raise
time.sleep(RETRY_SLEEP_DURATION)
if f"{e}".startswith("Rate limit"):
print("Rate limit reached... sleeping for 20 seconds")
start_time+=20
time.sleep(20)
raise TimeoutError("API call timed out")
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
<fim_suffix>
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}") | except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}") | CATCH | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_persistence_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the persistence layer.
"""
raise NotImplementedError
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
# microagents/agents/agent_lifecycle.py
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
"""
from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
<fim_suffix>
return agents
<fim_middle>for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent) | for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
if agent:
agents.append(agent) | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
<fim_suffix>
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent | for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent | FOR | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
<fim_suffix>
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4()) | if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4()) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
<fim_suffix>
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>if id:
self.id = id
else:
self.id = str(uuid.uuid4()) | if id:
self.id = id
else:
self.id = str(uuid.uuid4()) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
<fim_suffix>
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent | if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/agent_lifecycle.py
def save_agent(self, agent: MicroAgent) -> None:
"""Saves the given agent with error handling."""
try:
self.agent_persistence.save_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def remove_agent(self, agent: MicroAgent) -> None:
"""Removes the given agent with error handling."""
try:
self.agent_persistence.remove_agent(agent)
except Exception as e:
logger.exception(f"Error in saving agent: {e}")
raise
# microagents/agents/agent_lifecycle.py
def _generate_llm_prompt(self, goal: str, sample_input: str) -> str:
"""
Generates a prompt for the LLM based on the given goal and sample input.
"""
messages = [
{"role": "system", "content": PROMPT_ENGINEERING_SYSTEM_PROMPT},
{"role": "user", "content": PROMPT_ENGINEERING_TEMPLATE.format(goal=goal, sample_input=sample_input, examples=EXAMPLES)}
]
try:
return self.openai_wrapper.chat_completion(messages=messages)
except Exception as e:
logger.exception(f"Error generating LLM prompt: {e}")
return ""
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format")
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
<fim_suffix>
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose) | if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_similarity.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/logic.py
def display_agent_info(self, table_data):
time.sleep(2)
self.app.table.clear()
if len(table_data) > 0:
for row in table_data:
styled_row = [Text(str(cell), no_wrap=False, overflow="fold") for cell in row]
self.app.table.add_row(*styled_row)
# microagents/agents/microagent_manager.py
def get_or_create_agent(self, purpose: str, depth: int, sample_input: str, parent_agent=None) -> Any:
"""
Retrieves an existing agent or creates a new one based on the given purpose.
"""
logger.info(f"Getting or creating agent for purpose: {purpose}")
try:
agent = self.agent_lifecycle.get_or_create_agent(purpose, depth, sample_input, parent_agent=parent_agent)
logger.info(f"Agent for purpose '{purpose}' retrieved or created.")
return agent
except Exception as e:
logging.exception(f"Error in getting or creating agent: {e}")
raise
# microagents/integrations/openaiwrapper.py
def get_embedding(self, text):
"""
Retrieves the embedding for the given text.
:param text: The text for which embedding is required.
:return: The embedding for the given text.
"""
start_time = time.time()
retries = 0
while time.time() - start_time < self.timeout:
try:
response = self._openai_client.embeddings.create(input=text, model=ENGINE)
data = {
"data": [],
"model": response.model,
"usage" : {
"prompt_tokens": response.usage.prompt_tokens,
"total_tokens": response.usage.total_tokens
}
}
for emb in response.data:
data["data"].append({
"embedding": emb.embedding,
"index": emb.index
})
return data
except openai.OpenAIError as e:
logging.error(f"OpenAI API error: {e}")
retries += 1
if retries >= self.max_retries:
raise
time.sleep(RETRY_SLEEP_DURATION)
if f"{e}".startswith("Rate limit"):
print("Rate limit reached... sleeping for 20 seconds")
start_time+=20
time.sleep(20)
raise TimeoutError("API call timed out")
"""
import logging
import numpy as np
from typing import List, Tuple, Optional
from sklearn.metrics.pairwise import cosine_similarity
from integrations.openaiwrapper import OpenAIAPIWrapper
logger = logging.getLogger()
class Agent:
def __init__(self, purpose: str):
self.purpose = purpose
self.purpose_embedding=None
class AgentSimilarity:
def __init__(self, openai_wrapper: OpenAIAPIWrapper, agents: List[Agent]):
"""
Initializes the AgentSimilarity object.
:param openai_wrapper: Instance of OpenAIAPIWrapper to interact with OpenAI API.
:param agents: List of Agent objects.
"""
self.openai_wrapper = openai_wrapper
self.agents = agents
def get_embedding(self, text: str) -> np.ndarray:
"""
Retrieves the embedding for a given text.
:param text: Text to get embedding for.
:return: Embedding as a numpy array.
"""
try:
response = self.openai_wrapper.get_embedding(text)
<fim_suffix>
except Exception as e:
logger.exception(f"Error retrieving embedding: {e}")
raise ValueError(f"Error retrieving embedding: {e}")
def calculate_similarity_threshold(self) -> float:
"""
Calculates the 98th percentile of the similarity threshold across all agents.
:return: 98th percentile of similarity threshold.
"""
try:
embeddings=[]
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
embeddings.append(agent.purpose_embedding)
if len(embeddings) < 250:
return 0.999
similarities = [cosine_similarity([e1], [e2])[0][0] for i, e1 in enumerate(embeddings) for e2 in embeddings[i+1:]]
return np.percentile(similarities, 98) if similarities else 0.999
except Exception as e:
logger.exception(f"Error calculating similarity threshold: {e}")
raise ValueError(f"Error calculating similarity threshold: {e}")
def find_closest_agent(self, purpose_embedding: np.ndarray) -> Tuple[Optional[Agent], float]:
"""
Finds the closest agent based on the given purpose embedding.
:param purpose_embedding: The embedding of the purpose to find the closest agent for.
:return: Tuple of the closest agent and the highest similarity score.
"""
closest_agent: Optional[Agent] = None
highest_similarity: float = -np.inf
try:
for agent in self.agents:
if agent.purpose_embedding is None:
agent.purpose_embedding = self.get_embedding(agent.purpose)
similarity = cosine_similarity([agent.purpose_embedding], [purpose_embedding])[0][0]
if similarity > highest_similarity:
highest_similarity = similarity
closest_agent = agent
return closest_agent, highest_similarity
except Exception as e:
logger.exception(f"Error finding closest agent: {e}")
raise ValueError(f"Error finding closest agent: {e}")
<fim_middle>if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format") | if 'data' in response and len(response['data']) > 0 and 'embedding' in response['data'][0]:
return np.array(response['data'][0]['embedding'])
else:
logger.exception("Invalid response format")
raise ValueError("Invalid response format") | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/agents/parallel_agent_executor.py
def determine_winning_agent(self):
if not self.response_queue.empty():
return self.response_queue.get()
return None, None
# microagents/agents/microagent.py
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
# microagents/gradio_ui/agent_manager.py
def get_agents_info(self) -> List[dict]:
"""
Retrieve information about all agents for display in Gradio.
"""
agents = self.manager.get_agents()
agents_sorted = self.sort_agents(agents)
if not agents_sorted:
return []
return [self.format_agent_info(agent, agents_sorted) for agent in agents_sorted]
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
<fim_suffix>
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>if result is not None:
return result | if result is not None:
return result | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/agent_persistence_manager.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the persistence layer.
"""
raise NotImplementedError
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
# microagents/agents/agent_lifecycle.py
def cleanup_agents(self):
"""Remove all agents with status stopped = True in an efficient manner."""
self.agents = [agent for agent in self.agents if not agent.stopped]
"""
from agents.agent_serializer import AgentSerializer
from integrations.memoize import memoize_to_sqlite
from integrations.sqlite_agent_persistence import SQLiteAgentPersistence
class AgentPersistenceManager:
def __init__(self, db_filename="agents.db"):
self.persistence = SQLiteAgentPersistence(db_filename)
def remove_agent(self, agent):
"""
Remove an agent from the database.
"""
self.persistence.remove_agent(agent.id)
def save_agent(self, agent):
"""
Serialize and save the agent state if it is a working agent and not a prime agent.
"""
if agent.is_working_agent() and not agent.is_prime_agent():
serialized_agent = AgentSerializer.serialize(agent)
self.persistence.save_agent(serialized_agent)
def load_agent(self, purpose, agent_lifecycle, openai_wrapper):
"""
Load an agent with the given purpose from the database.
"""
serialized_agent = self.persistence.fetch_agent(purpose)
if serialized_agent:
return AgentSerializer.from_dict(serialized_agent, agent_lifecycle, openai_wrapper)
return None
def load_all_agents(self, agent_lifecycle, openai_wrapper):
"""
Load all agents from the database.
"""
purposes = self.persistence.load_all_purposes()
agents = []
for purpose in purposes:
agent = self.load_agent(purpose, agent_lifecycle, openai_wrapper)
<fim_suffix>
return agents
<fim_middle>if agent:
agents.append(agent) | if agent:
agents.append(agent) | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/agents/microagent.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/ui/format.py
def print_agent_statistics(agent):
"""
Print statistics for a given agent.
"""
print(f"π Stats for {agent.purpose}:")
stats = [
f"π Evolve Count: {agent.evolve_count}",
f"π» Code Executions: {agent.number_of_code_executions}",
f"π₯ Active Agents: {agent.active_agents}",
f"π Usage Count: {agent.usage_count}",
f"ποΈ Max Depth: {agent.max_depth}",
f"π Depth: {agent.depth}",
f"π οΈ Working Agent: {agent.working_agent}",
f"π Last Input: {agent.last_input}",
f"π¦ Status: {agent.current_status}",
f"{Fore.MAGENTA}\nPrompt for {agent.purpose}:{Style.RESET_ALL}",
f"{Fore.LIGHTMAGENTA_EX}{agent.dynamic_prompt}\n{Style.RESET_ALL}"
]
print('\n'.join(stats))
# microagents/ui/logic.py
def output_results(self):
self.app.rlog.write("\n\nFinal Results:\n")
for agent in self.manager.get_agents():
self.app.rlog.write(f"π Stats for {agent.purpose} :")
self.app.rlog.write(f"π Evolve Count: {agent.evolve_count}")
self.app.rlog.write(f"π» Code Executions: {agent.number_of_code_executions}")
self.app.rlog.write(f"π₯ Active Agents: {agent.active_agents}")
self.app.rlog.write(f"π Usage Count: {agent.usage_count}")
self.app.rlog.write(f"π Max Depth: {agent.max_depth}")
self.app.rlog.write(f"π Depth: {agent.depth}")
self.app.rlog.write(f"π Working Agent::{agent.working_agent}")
self.app.rlog.write(f"π Last Input: {agent.last_input}")
self.app.rlog.write(f"π¦ Status: {agent.current_status}")
self.app.rlog.write(f"\nPrompt for {agent.purpose}:")
self.app.rlog.write(f"{agent.dynamic_prompt}\n")
# microagents/gradio_ui/agent_manager.py
def format_agent_info_details(self, agent: MicroAgent) -> dict:
"""
Format the information of a MicroAgent for display.
"""
return {
"Purpose": agent.purpose,
"System Prompt": agent.dynamic_prompt,
"Last Input": agent.last_input,
"Last Output": agent.last_output,
"Last Conversation": agent.last_conversation,
}
"""
import logging
import uuid
from integrations.openaiwrapper import OpenAIAPIWrapper
from agents.agent_evaluation import AgentEvaluator
from agents.agent_response import AgentResponse
from agents.agent_similarity import AgentSimilarity
from agents.response_extraction import ResponseExtraction
from agents.agent_stopped_exception import AgentStoppedException
from agents.response_handler import ResponseHandler
from runtime.code_execution import CodeExecution
from prompt_management.prompt_evolution import PromptEvolution
from utils.utility import get_env_variable, time_function, log_exception
logger = logging.getLogger()
class MicroAgent:
"""
The MicroAgent class encapsulates the behavior of a small, purpose-driven agent
that interacts with the OpenAI API.
"""
def __init__(self, initial_prompt, purpose, depth, agent_lifecycle, openai_wrapper, max_depth=3, bootstrap_agent=False, is_prime=False, purpose_embedding=None, parent=None, parent_id=None, id=None) :
self.dynamic_prompt = initial_prompt
self.purpose = purpose
self.purpose_embedding = purpose_embedding
self.depth = depth
self.max_depth = max_depth
self.usage_count = 0
self.working_agent = bootstrap_agent
self.agent_lifecycle = agent_lifecycle
self.openai_wrapper = openai_wrapper
self.evolve_count = 0
self.number_of_code_executions = 0
self.current_status = None
self.active_agents = {}
self.last_input = ""
self.last_output = ""
self.last_conversation = ""
self.stopped = False
self.is_prime = is_prime
self.stop_execution = False
<fim_suffix>
if parent_id:
self.parent_id = parent_id
if is_prime:
self.id = "2a5e6fe9-1bb1-426c-9521-145caa2cf66b"
else:
if id:
self.id = id
else:
self.id = str(uuid.uuid4())
# Initialize components used by the agent
self.agent_evaluator = AgentEvaluator(self.openai_wrapper)
self.code_executor = CodeExecution()
self.agent_responder = AgentResponse(self.openai_wrapper, self.agent_lifecycle, self.code_executor, self, agent_lifecycle, depth)
self.agent_similarity = AgentSimilarity(self.openai_wrapper, self.agent_lifecycle.agents)
self.prompt_evolver = PromptEvolution(self.openai_wrapper, self.agent_lifecycle)
self.response_extractor = ResponseExtraction(self.openai_wrapper)
self.response_handler = ResponseHandler(self)
def update_status(self, status):
"""Update the agent's current status."""
self.check_for_stopped()
self.current_status = status
logger.info(f"Agent {self.purpose} status updated to: {status}")
def update_active_agents(self, calling_agent, called_agent=None):
"""Update the tree view of active agents."""
if called_agent:
self.active_agents[calling_agent] = called_agent
else:
self.active_agents.pop(calling_agent, None)
logger.info(f"Active agents updated: {self.active_agents}")
def set_agent_as_working(self):
"""Set the agent as a working agent."""
self.working_agent = True
self.agent_lifecycle.save_agent(self)
logger.info(f"Agent {self.purpose} set as working agent.")
def get_children(self):
"""Get the children of the agent."""
return [agent for agent in self.agent_lifecycle.agents if agent.parent_id == self.id]
def is_working_agent(self):
return self.working_agent
def set_agent_deleted(self):
"""Set the agent as deleted."""
self.working_agent = False
self.current_status = "β Deleted"
self.stopped = True
self.stop_execution = True
self.agent_lifecycle.remove_agent(self)
logger.info(f"Agent {self.purpose} set as deleted.")
def check_for_stopped(self):
"""Check if the agent has been stopped."""
if self.stop_execution:
self.current_status = "β Stopped"
if self.is_prime:
self.agent_lifecycle.reset_all_agents()
raise AgentStoppedException("Agent stopped.")
def respond(self, input_text, evolve_count=0):
"""
Generate a response to the given input text.
"""
return self.response_handler.respond(input_text, evolve_count)
def stop(self):
"""Stop the agent."""
self.stop_execution = True
if not self.is_working_agent():
self.stopped = True
def reset(self):
"""Reset the agent's stopped status."""
self.current_status = ""
self.stop_execution = False
def __eq__(self, other):
if not isinstance(other, MicroAgent):
return NotImplemented
return (self.dynamic_prompt, self.purpose) == (other.dynamic_prompt, other.purpose)
def __hash__(self):
return hash((self.dynamic_prompt, self.purpose))
<fim_middle>if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None | if parent:
self.parent_id = parent.id if parent else None
else:
self.parent_id = None | IF | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
# microagents/integrations/sqlite_agent_persistence.py
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
# microagents/integrations/sqlite_agent_persistence.py
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
<fim_suffix>
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped | def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs)
return wrapped | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/utils/utility.py
def time_function(func: T) -> T:
"""
Decorator to measure the execution time of a function.
Args:
func (Callable): The function to measure.
Returns:
Callable: A wrapper function that adds execution time measurement to the input function.
"""
def wrapper(*args, **kwargs) -> Any:
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter() - start_time
logger.info(f"Time taken by {func.__name__}: {end_time:.6f} seconds")
return result
return wrapper
# microagents/integrations/sqlite_agent_persistence.py
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
# microagents/utils/utility.py
def wrapper(*args, **kwargs) -> Any:
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter() - start_time
logger.info(f"Time taken by {func.__name__}: {end_time:.6f} seconds")
return result
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
<fim_suffix>
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs) | def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/memoize.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/sqlite_agent_persistence.py
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]
# microagents/integrations/sqlite_agent_persistence.py
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
# microagents/integrations/sqlite_agent_persistence.py
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
# add id field
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
"""
import sqlite3
import hashlib
import json
import functools
## Originally from https://www.kevinkatz.io/posts/memoize-to-sqlite
def memoize_to_sqlite(func_name: str, filename: str = "cache.db"):
"""
Memoization decorator that caches the output of a method in a SQLite
database.
"""
def decorator(func):
@functools.wraps(func)
<fim_suffix>
return wrapped
return decorator
class SQLiteMemoization:
def __init__(self, filename):
self.filename = filename
self.connection = None
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
self.connection = None
def _initialize_database(self):
self.connection.execute(
"CREATE TABLE IF NOT EXISTS cache (hash TEXT PRIMARY KEY, result TEXT)"
)
self.connection.execute(
"CREATE INDEX IF NOT EXISTS cache_ndx ON cache(hash)"
)
def fetch_or_compute(self, func, func_name, *args, **kwargs):
arg_hash = self._compute_hash(func_name, *args, **kwargs)
result = self._fetch_from_cache(arg_hash)
if result is not None:
return result
return self._compute_and_cache_result(func, arg_hash, *args, **kwargs)
def _compute_hash(self, func_name, *args, **kwargs):
data = f"{func_name}:{repr(args)}:{repr(kwargs)}".encode("utf-8")
return hashlib.sha256(data).hexdigest()
def _fetch_from_cache(self, arg_hash):
cursor = self.connection.cursor()
cursor.execute("SELECT result FROM cache WHERE hash = ?", (arg_hash,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def _compute_and_cache_result(self, func, arg_hash, *args, **kwargs):
result = func(*args, **kwargs)
self._cache_result(arg_hash, result)
return result
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()<fim_middle>def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs) | def wrapped(*args, **kwargs):
with SQLiteMemoization(filename) as memoizer:
return memoizer.fetch_or_compute(func, func_name, *args, **kwargs) | METHOD | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |
<filename>microagents/integrations/sqlite_agent_persistence.py<fim_prefix>""" Here are some snippets of code retrieved from other files in this repository that may help you:
# microagents/integrations/memoize.py
def _cache_result(self, arg_hash, result):
cursor = self.connection.cursor()
cursor.execute(
"INSERT INTO cache (hash, result) VALUES (?, ?)",
(arg_hash, json.dumps(result))
)
self.connection.commit()
# microagents/integrations/agent_persistence.py
def save_agent(self, agent_dict):
"""
Save the serialized agent to the persistence layer.
"""
raise NotImplementedError
# microagents/integrations/memoize.py
def __enter__(self):
self.connection = sqlite3.connect(self.filename)
self._initialize_database()
return self
"""
import sqlite3
import json
from integrations.agent_persistence import AbstractAgentPersistence
class SQLiteAgentPersistence(AbstractAgentPersistence):
def __init__(self, filename="agents.db"):
self.filename = filename
self._initialize_database()
def _initialize_database(self):
"""
Initialize the SQLite database with the required schema.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS agents (
id TEXT PRIMARY KEY,
purpose TEXT,
data TEXT
)
""")
def remove_agent(self, purpose):
"""
Remove an agent from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute("DELETE FROM agents WHERE id = ?", (purpose,))
def save_agent(self, agent_dict):
"""
Save the serialized agent to an SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
conn.execute(
<fim_suffix>
"REPLACE INTO agents (id, purpose, data) VALUES (?, ?, ?)",
(agent_dict['id'], agent_dict['purpose'], json.dumps(agent_dict))
)
def fetch_agent(self, purpose):
"""
Fetch a serialized agent based on its purpose from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT data FROM agents WHERE purpose = ?", (purpose,))
row = cursor.fetchone()
return json.loads(row[0]) if row else None
def load_all_purposes(self):
"""
Load all agent purposes from the SQLite database.
"""
with sqlite3.connect(self.filename) as conn:
cursor = conn.cursor()
cursor.execute("SELECT purpose FROM agents")
return [row[0] for row in cursor.fetchall()]<fim_middle># add id field | # add id field | LINE_COMMENT | prefix_suffix_full_complete_current_block_with_repo_rag_oracle |