kleytondacosta
commited on
Commit
•
a0ec4db
1
Parent(s):
af13c8e
Update index.html
Browse files- index.html +4 -175
index.html
CHANGED
@@ -97,7 +97,6 @@
|
|
97 |
<span>Leaderboard</span>
|
98 |
</a>
|
99 |
</div>
|
100 |
-
|
101 |
</div>
|
102 |
</div>
|
103 |
</div>
|
@@ -117,64 +116,6 @@
|
|
117 |
</section>
|
118 |
|
119 |
|
120 |
-
<section class="hero is-light is-small">
|
121 |
-
<div class="hero-body">
|
122 |
-
<div class="container">
|
123 |
-
<div id="results-carousel" class="carousel results-carousel">
|
124 |
-
<div class="item item-steve">
|
125 |
-
<video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
|
126 |
-
<source src="./static/videos/steve.mp4"
|
127 |
-
type="video/mp4">
|
128 |
-
</video>
|
129 |
-
</div>
|
130 |
-
<div class="item item-chair-tp">
|
131 |
-
<video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
|
132 |
-
<source src="./static/videos/chair-tp.mp4"
|
133 |
-
type="video/mp4">
|
134 |
-
</video>
|
135 |
-
</div>
|
136 |
-
<div class="item item-shiba">
|
137 |
-
<video poster="" id="shiba" autoplay controls muted loop playsinline height="100%">
|
138 |
-
<source src="./static/videos/shiba.mp4"
|
139 |
-
type="video/mp4">
|
140 |
-
</video>
|
141 |
-
</div>
|
142 |
-
<div class="item item-fullbody">
|
143 |
-
<video poster="" id="fullbody" autoplay controls muted loop playsinline height="100%">
|
144 |
-
<source src="./static/videos/fullbody.mp4"
|
145 |
-
type="video/mp4">
|
146 |
-
</video>
|
147 |
-
</div>
|
148 |
-
<div class="item item-blueshirt">
|
149 |
-
<video poster="" id="blueshirt" autoplay controls muted loop playsinline height="100%">
|
150 |
-
<source src="./static/videos/blueshirt.mp4"
|
151 |
-
type="video/mp4">
|
152 |
-
</video>
|
153 |
-
</div>
|
154 |
-
<div class="item item-mask">
|
155 |
-
<video poster="" id="mask" autoplay controls muted loop playsinline height="100%">
|
156 |
-
<source src="./static/videos/mask.mp4"
|
157 |
-
type="video/mp4">
|
158 |
-
</video>
|
159 |
-
</div>
|
160 |
-
<div class="item item-coffee">
|
161 |
-
<video poster="" id="coffee" autoplay controls muted loop playsinline height="100%">
|
162 |
-
<source src="./static/videos/coffee.mp4"
|
163 |
-
type="video/mp4">
|
164 |
-
</video>
|
165 |
-
</div>
|
166 |
-
<div class="item item-toby">
|
167 |
-
<video poster="" id="toby" autoplay controls muted loop playsinline height="100%">
|
168 |
-
<source src="./static/videos/toby2.mp4"
|
169 |
-
type="video/mp4">
|
170 |
-
</video>
|
171 |
-
</div>
|
172 |
-
</div>
|
173 |
-
</div>
|
174 |
-
</div>
|
175 |
-
</section>
|
176 |
-
|
177 |
-
|
178 |
<section class="section">
|
179 |
<div class="container is-max-desktop">
|
180 |
<!-- Abstract. -->
|
@@ -220,118 +161,6 @@
|
|
220 |
</div>
|
221 |
<!--/ Visual Effects. -->
|
222 |
|
223 |
-
<!-- Matting. -->
|
224 |
-
<div class="column">
|
225 |
-
<h2 class="title is-3">Matting</h2>
|
226 |
-
<div class="columns is-centered">
|
227 |
-
<div class="column content">
|
228 |
-
<p>
|
229 |
-
As a byproduct of our method, we can also solve the matting problem by ignoring
|
230 |
-
samples that fall outside of a bounding box during rendering.
|
231 |
-
</p>
|
232 |
-
<video id="matting-video" controls playsinline height="100%">
|
233 |
-
<source src="./static/videos/matting.mp4"
|
234 |
-
type="video/mp4">
|
235 |
-
</video>
|
236 |
-
</div>
|
237 |
-
|
238 |
-
</div>
|
239 |
-
</div>
|
240 |
-
</div>
|
241 |
-
<!--/ Matting. -->
|
242 |
-
|
243 |
-
<!-- Animation. -->
|
244 |
-
<div class="columns is-centered">
|
245 |
-
<div class="column is-full-width">
|
246 |
-
<h2 class="title is-3">Animation</h2>
|
247 |
-
|
248 |
-
<!-- Interpolating. -->
|
249 |
-
<h3 class="title is-4">Interpolating states</h3>
|
250 |
-
<div class="content has-text-justified">
|
251 |
-
<p>
|
252 |
-
We can also animate the scene by interpolating the deformation latent codes of two input
|
253 |
-
frames. Use the slider here to linearly interpolate between the left frame and the right
|
254 |
-
frame.
|
255 |
-
</p>
|
256 |
-
</div>
|
257 |
-
<div class="columns is-vcentered interpolation-panel">
|
258 |
-
<div class="column is-3 has-text-centered">
|
259 |
-
<img src="./static/images/interpolate_start.jpg"
|
260 |
-
class="interpolation-image"
|
261 |
-
alt="Interpolate start reference image."/>
|
262 |
-
<p>Start Frame</p>
|
263 |
-
</div>
|
264 |
-
<div class="column interpolation-video-column">
|
265 |
-
<div id="interpolation-image-wrapper">
|
266 |
-
Loading...
|
267 |
-
</div>
|
268 |
-
<input class="slider is-fullwidth is-large is-info"
|
269 |
-
id="interpolation-slider"
|
270 |
-
step="1" min="0" max="100" value="0" type="range">
|
271 |
-
</div>
|
272 |
-
<div class="column is-3 has-text-centered">
|
273 |
-
<img src="./static/images/interpolate_end.jpg"
|
274 |
-
class="interpolation-image"
|
275 |
-
alt="Interpolation end reference image."/>
|
276 |
-
<p class="is-bold">End Frame</p>
|
277 |
-
</div>
|
278 |
-
</div>
|
279 |
-
<br/>
|
280 |
-
<!--/ Interpolating. -->
|
281 |
-
|
282 |
-
<!-- Re-rendering. -->
|
283 |
-
<h3 class="title is-4">Re-rendering the input video</h3>
|
284 |
-
<div class="content has-text-justified">
|
285 |
-
<p>
|
286 |
-
Using <span class="dnerf">Nerfies</span>, you can re-render a video from a novel
|
287 |
-
viewpoint such as a stabilized camera by playing back the training deformations.
|
288 |
-
</p>
|
289 |
-
</div>
|
290 |
-
<div class="content has-text-centered">
|
291 |
-
<video id="replay-video"
|
292 |
-
controls
|
293 |
-
muted
|
294 |
-
preload
|
295 |
-
playsinline
|
296 |
-
width="75%">
|
297 |
-
<source src="./static/videos/replay.mp4"
|
298 |
-
type="video/mp4">
|
299 |
-
</video>
|
300 |
-
</div>
|
301 |
-
<!--/ Re-rendering. -->
|
302 |
-
|
303 |
-
</div>
|
304 |
-
</div>
|
305 |
-
<!--/ Animation. -->
|
306 |
-
|
307 |
-
|
308 |
-
<!-- Concurrent Work. -->
|
309 |
-
<div class="columns is-centered">
|
310 |
-
<div class="column is-full-width">
|
311 |
-
<h2 class="title is-3">Related Links</h2>
|
312 |
-
|
313 |
-
<div class="content has-text-justified">
|
314 |
-
<p>
|
315 |
-
There's a lot of excellent work that was introduced around the same time as ours.
|
316 |
-
</p>
|
317 |
-
<p>
|
318 |
-
<a href="https://arxiv.org/abs/2104.09125" target="_blank">Progressive Encoding for Neural Optimization</a> introduces an idea similar to our windowed position encoding for coarse-to-fine optimization.
|
319 |
-
</p>
|
320 |
-
<p>
|
321 |
-
<a href="https://www.albertpumarola.com/research/D-NeRF/index.html" target="_blank">D-NeRF</a> and <a href="https://gvv.mpi-inf.mpg.de/projects/nonrigid_nerf/" target="_blank">NR-NeRF</a>
|
322 |
-
both use deformation fields to model non-rigid scenes.
|
323 |
-
</p>
|
324 |
-
<p>
|
325 |
-
Some works model videos with a NeRF by directly modulating the density, such as <a href="https://video-nerf.github.io/" target="_blank">Video-NeRF</a>, <a href="https://www.cs.cornell.edu/~zl548/NSFF/" target="_blank">NSFF</a>, and <a href="https://neural-3d-video.github.io/" target="_blank">DyNeRF</a>
|
326 |
-
</p>
|
327 |
-
<p>
|
328 |
-
There are probably many more by the time you are reading this. Check out <a href="https://dellaert.github.io/NeRF/" target="_blank">Frank Dellart's survey on recent NeRF papers</a>, and <a href="https://github.com/yenchenlin/awesome-NeRF" target="_blank">Yen-Chen Lin's curated list of NeRF papers</a>.
|
329 |
-
</p>
|
330 |
-
</div>
|
331 |
-
</div>
|
332 |
-
</div>
|
333 |
-
<!--/ Concurrent Work. -->
|
334 |
-
|
335 |
</div>
|
336 |
</section>
|
337 |
|
@@ -339,11 +168,11 @@
|
|
339 |
<section class="section" id="BibTeX">
|
340 |
<div class="container is-max-desktop content">
|
341 |
<h2 class="title">BibTeX</h2>
|
342 |
-
<pre><code>@article{
|
343 |
-
author = {
|
344 |
-
title = {
|
345 |
journal = {ICCV},
|
346 |
-
year = {
|
347 |
}</code></pre>
|
348 |
</div>
|
349 |
</section>
|
|
|
97 |
<span>Leaderboard</span>
|
98 |
</a>
|
99 |
</div>
|
|
|
100 |
</div>
|
101 |
</div>
|
102 |
</div>
|
|
|
116 |
</section>
|
117 |
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
<section class="section">
|
120 |
<div class="container is-max-desktop">
|
121 |
<!-- Abstract. -->
|
|
|
161 |
</div>
|
162 |
<!--/ Visual Effects. -->
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
</div>
|
165 |
</section>
|
166 |
|
|
|
168 |
<section class="section" id="BibTeX">
|
169 |
<div class="container is-max-desktop content">
|
170 |
<h2 class="title">BibTeX</h2>
|
171 |
+
<pre><code>@article{dacosta2025bmbench,
|
172 |
+
author = {da Costa, K., Munoz, C., Modenesi, B., Fernandez, F., Koshiyama, A.},
|
173 |
+
title = {BMBENCH: Empirical Benchmarking of Algorithmic Fairness in Machine Learning Models},
|
174 |
journal = {ICCV},
|
175 |
+
year = {2025},
|
176 |
}</code></pre>
|
177 |
</div>
|
178 |
</section>
|