Spaces:
Running
Running
SauravMaheshkar
commited on
Commit
β’
1d4cc3a
1
Parent(s):
65947b1
chore: refactor src
Browse files- app.py +16 -5
- requirements.txt +3 -1
- src/__init__.py +0 -0
- augmentations.py β src/augmentations.py +0 -0
- models.py β src/models.py +1 -1
- utils.py β src/utils.py +0 -1
app.py
CHANGED
@@ -1,16 +1,21 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
|
4 |
-
from augmentations import get_videomae_transform
|
5 |
-
from models import load_model
|
6 |
-
from utils import
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
|
10 |
-
transform = get_videomae_transform()
|
11 |
-
|
12 |
|
13 |
def get_visualisations(mask_ratio, video_path):
|
|
|
|
|
14 |
frames, ids = get_frames(path=video_path, transform=transform)
|
15 |
|
16 |
model, masks, patch_size = load_model(
|
@@ -36,6 +41,12 @@ def get_visualisations(mask_ratio, video_path):
|
|
36 |
|
37 |
|
38 |
with gr.Blocks() as app:
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
video = gr.Video(
|
40 |
value="assets/example.mp4",
|
41 |
)
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
|
4 |
+
from src.augmentations import get_videomae_transform
|
5 |
+
from src.models import load_model
|
6 |
+
from src.utils import (
|
7 |
+
create_plot,
|
8 |
+
get_frames,
|
9 |
+
get_videomae_outputs,
|
10 |
+
prepare_frames_masks,
|
11 |
+
)
|
12 |
|
13 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
14 |
|
|
|
|
|
15 |
|
16 |
def get_visualisations(mask_ratio, video_path):
|
17 |
+
transform = get_videomae_transform()
|
18 |
+
|
19 |
frames, ids = get_frames(path=video_path, transform=transform)
|
20 |
|
21 |
model, masks, patch_size = load_model(
|
|
|
41 |
|
42 |
|
43 |
with gr.Blocks() as app:
|
44 |
+
gr.Markdown(
|
45 |
+
"""
|
46 |
+
# VideoMAE Reconstruction Demo
|
47 |
+
To read more about the Self-Supervised Learning techniques for video please refer to the [Lightly AI blogpost on Self-Supervised Learning for Videos](www.lightly.ai/post/self-supervised-learning-for-videos).
|
48 |
+
""" # noqa: E501
|
49 |
+
)
|
50 |
video = gr.Video(
|
51 |
value="assets/example.mp4",
|
52 |
)
|
requirements.txt
CHANGED
@@ -1,6 +1,8 @@
|
|
|
|
1 |
einops
|
2 |
-
|
3 |
numpy
|
|
|
4 |
timm
|
5 |
torch
|
6 |
torchvision
|
|
|
1 |
+
eva-decord
|
2 |
einops
|
3 |
+
gradio
|
4 |
numpy
|
5 |
+
Pillow
|
6 |
timm
|
7 |
torch
|
8 |
torchvision
|
src/__init__.py
ADDED
File without changes
|
augmentations.py β src/augmentations.py
RENAMED
File without changes
|
models.py β src/models.py
RENAMED
@@ -8,7 +8,7 @@ import torch.nn.functional as F
|
|
8 |
import torch.utils.checkpoint as checkpoint
|
9 |
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
10 |
|
11 |
-
from augmentations import TubeMaskingGenerator
|
12 |
|
13 |
__all__ = ["load_model"]
|
14 |
|
|
|
8 |
import torch.utils.checkpoint as checkpoint
|
9 |
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
|
10 |
|
11 |
+
from src.augmentations import TubeMaskingGenerator
|
12 |
|
13 |
__all__ = ["load_model"]
|
14 |
|
utils.py β src/utils.py
RENAMED
@@ -139,6 +139,5 @@ def create_plot(images):
|
|
139 |
axes[i, j].set_title(column_names[j], fontsize=16)
|
140 |
|
141 |
plt.tight_layout()
|
142 |
-
plt.show()
|
143 |
|
144 |
return fig
|
|
|
139 |
axes[i, j].set_title(column_names[j], fontsize=16)
|
140 |
|
141 |
plt.tight_layout()
|
|
|
142 |
|
143 |
return fig
|