text
stringlengths 0
93.6k
|
---|
white_bkgd=config.white_bkgd),
|
axis_name='batch')
|
render_eval_pfn = jax.pmap(
|
render_eval_fn,
|
in_axes=(None, None, 0), # Only distribute the data input.
|
donate_argnums=(2,),
|
axis_name='batch',
|
)
|
ssim_fn = jax.jit(functools.partial(math.compute_ssim, max_val=1.))
|
if not utils.isdir(FLAGS.train_dir):
|
utils.makedirs(FLAGS.train_dir)
|
state = checkpoints.restore_checkpoint(FLAGS.train_dir, state)
|
# Resume training a the step of the last checkpoint.
|
init_step = state.optimizer.state.step + 1
|
state = flax.jax_utils.replicate(state)
|
if jax.host_id() == 0:
|
summary_writer = tensorboard.SummaryWriter(FLAGS.train_dir)
|
# Prefetch_buffer_size = 3 x batch_size
|
pdataset = flax.jax_utils.prefetch_to_device(dataset, 3)
|
rng = rng + jax.host_id() # Make random seed separate across hosts.
|
keys = random.split(rng, jax.local_device_count()) # For pmapping RNG keys.
|
gc.disable() # Disable automatic garbage collection for efficiency.
|
stats_trace = []
|
reset_timer = True
|
for step, batch in zip(range(init_step, config.max_steps + 1), pdataset):
|
if reset_timer:
|
t_loop_start = time.time()
|
reset_timer = False
|
lr = learning_rate_fn(step)
|
state, stats, keys = train_pstep(keys, state, batch, lr)
|
if jax.host_id() == 0:
|
stats_trace.append(stats)
|
if step % config.gc_every == 0:
|
gc.collect()
|
# Log training summaries. This is put behind a host_id check because in
|
# multi-host evaluation, all hosts need to run inference even though we
|
# only use host 0 to record results.
|
if jax.host_id() == 0:
|
if step % config.print_every == 0:
|
summary_writer.scalar('num_params', num_params, step)
|
summary_writer.scalar('train_loss', stats.loss[0], step)
|
summary_writer.scalar('train_psnr', stats.psnr[0], step)
|
for i, l in enumerate(stats.losses[0]):
|
summary_writer.scalar(f'train_losses_{i}', l, step)
|
for i, p in enumerate(stats.psnrs[0]):
|
summary_writer.scalar(f'train_psnrs_{i}', p, step)
|
summary_writer.scalar('weight_l2', stats.weight_l2[0], step)
|
avg_loss = np.mean(np.concatenate([s.loss for s in stats_trace]))
|
avg_psnr = np.mean(np.concatenate([s.psnr for s in stats_trace]))
|
max_grad_norm = np.max(
|
np.concatenate([s.grad_norm for s in stats_trace]))
|
avg_grad_norm = np.mean(
|
np.concatenate([s.grad_norm for s in stats_trace]))
|
max_clipped_grad_norm = np.max(
|
np.concatenate([s.grad_norm_clipped for s in stats_trace]))
|
max_grad_max = np.max(
|
np.concatenate([s.grad_abs_max for s in stats_trace]))
|
stats_trace = []
|
summary_writer.scalar('train_avg_loss', avg_loss, step)
|
summary_writer.scalar('train_avg_psnr', avg_psnr, step)
|
summary_writer.scalar('train_max_grad_norm', max_grad_norm, step)
|
summary_writer.scalar('train_avg_grad_norm', avg_grad_norm, step)
|
summary_writer.scalar('train_max_clipped_grad_norm',
|
max_clipped_grad_norm, step)
|
summary_writer.scalar('train_max_grad_max', max_grad_max, step)
|
summary_writer.scalar('learning_rate', lr, step)
|
steps_per_sec = config.print_every / (time.time() - t_loop_start)
|
reset_timer = True
|
rays_per_sec = config.batch_size * steps_per_sec
|
summary_writer.scalar('train_steps_per_sec', steps_per_sec, step)
|
summary_writer.scalar('train_rays_per_sec', rays_per_sec, step)
|
precision = int(np.ceil(np.log10(config.max_steps))) + 1
|
print(('{:' + '{:d}'.format(precision) + 'd}').format(step) +
|
f'/{config.max_steps:d}: ' + f'i_loss={stats.loss[0]:0.4f}, ' +
|
f'avg_loss={avg_loss:0.4f}, ' +
|
f'weight_l2={stats.weight_l2[0]:0.2e}, ' + f'lr={lr:0.2e}, ' +
|
f'{rays_per_sec:0.0f} rays/sec')
|
if step % config.save_every == 0:
|
state_to_save = jax.device_get(jax.tree_map(lambda x: x[0], state))
|
checkpoints.save_checkpoint(
|
FLAGS.train_dir, state_to_save, int(step), keep=100)
|
# Test-set evaluation.
|
if FLAGS.render_every > 0 and step % FLAGS.render_every == 0:
|
# We reuse the same random number generator from the optimization step
|
# here on purpose so that the visualization matches what happened in
|
# training.
|
t_eval_start = time.time()
|
eval_variables = jax.device_get(jax.tree_map(lambda x: x[0],
|
state)).optimizer.target
|
test_case = next(test_dataset)
|
pred_color, pred_distance, pred_acc = models.render_image(
|
functools.partial(render_eval_pfn, eval_variables),
|
test_case['rays'],
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.