text
stringlengths
0
93.6k
config.coarse_loss_mult * jnp.sum(losses[:-1]) + losses[-1] + weight_l2)
stats = utils.Stats(
loss=loss,
losses=losses,
weight_l2=weight_l2,
psnr=0.0,
psnrs=0.0,
grad_norm=0.0,
grad_abs_max=0.0,
grad_norm_clipped=0.0,
)
return loss, stats
(_, stats), grad = (
jax.value_and_grad(loss_fn, has_aux=True)(state.optimizer.target))
grad = jax.lax.pmean(grad, axis_name='batch')
stats = jax.lax.pmean(stats, axis_name='batch')
def tree_norm(tree):
return jnp.sqrt(
jax.tree_util.tree_reduce(
lambda x, y: x + jnp.sum(y**2), tree, initializer=0))
if config.grad_max_val > 0:
clip_fn = lambda z: jnp.clip(z, -config.grad_max_val, config.grad_max_val)
grad = jax.tree_util.tree_map(clip_fn, grad)
grad_abs_max = jax.tree_util.tree_reduce(
lambda x, y: jnp.maximum(x, jnp.max(jnp.abs(y))), grad, initializer=0)
grad_norm = tree_norm(grad)
if config.grad_max_norm > 0:
mult = jnp.minimum(1, config.grad_max_norm / (1e-7 + grad_norm))
grad = jax.tree_util.tree_map(lambda z: mult * z, grad)
grad_norm_clipped = tree_norm(grad)
new_optimizer = state.optimizer.apply_gradient(grad, learning_rate=lr)
new_state = state.replace(optimizer=new_optimizer)
psnrs = math.mse_to_psnr(stats.losses)
stats = utils.Stats(
loss=stats.loss,
losses=stats.losses,
weight_l2=stats.weight_l2,
psnr=psnrs[-1],
psnrs=psnrs,
grad_norm=grad_norm,
grad_abs_max=grad_abs_max,
grad_norm_clipped=grad_norm_clipped,
)
return new_state, stats, rng
def main(unused_argv):
rng = random.PRNGKey(20200823)
# Shift the numpy random seed by host_id() to shuffle data loaded by different
# hosts.
np.random.seed(20201473 + jax.host_id())
config = utils.load_config()
if config.batch_size % jax.device_count() != 0:
raise ValueError('Batch size must be divisible by the number of devices.')
dataset = datasets.get_dataset('train', FLAGS.data_dir, config)
test_dataset = datasets.get_dataset('test', FLAGS.data_dir, config)
rng, key = random.split(rng)
model, variables = models.construct_mipnerf(key, dataset.peek())
num_params = jax.tree_util.tree_reduce(
lambda x, y: x + jnp.prod(jnp.array(y.shape)), variables, initializer=0)
print(f'Number of parameters being optimized: {num_params}')
optimizer = flax.optim.Adam(config.lr_init).create(variables)
state = utils.TrainState(optimizer=optimizer)
del optimizer, variables
learning_rate_fn = functools.partial(
math.learning_rate_decay,
lr_init=config.lr_init,
lr_final=config.lr_final,
max_steps=config.max_steps,
lr_delay_steps=config.lr_delay_steps,
lr_delay_mult=config.lr_delay_mult)
train_pstep = jax.pmap(
functools.partial(train_step, model, config),
axis_name='batch',
in_axes=(0, 0, 0, None),
donate_argnums=(2,))
# Because this is only used for test set rendering, we disable randomization.
def render_eval_fn(variables, _, rays):
return jax.lax.all_gather(
model.apply(
variables,
random.PRNGKey(0), # Unused.
rays,
randomized=False,