Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -43,7 +43,7 @@ def process_images(videos, x, y):
|
|
43 |
return [flow, activation, attention]
|
44 |
|
45 |
|
46 |
-
title = "Modelling Human Visual Motion Processing with Trainable Motion Energy Sensing and a Self-attention Network "
|
47 |
description = "## Introduction π₯π₯π₯\n" \
|
48 |
" The intersection of cognitive neuroscience and computer vision offers exciting advancements in " \
|
49 |
"how machines perceive motion. Our research bridges the gap between these fields by proposing a novel " \
|
@@ -53,7 +53,7 @@ description = "## Introduction π₯π₯π₯\n" \
|
|
53 |
" physiological responses in V1 and MT neurons but also replicates human psychophysical responses " \
|
54 |
"to dynamic stimuli. \n\n\n" \
|
55 |
"![](https://drive.google.com/uc?id=10PcKzQ9X1nsXKUi8OPR0jN_ZsjlCAV47) \n" \
|
56 |
-
"## Environment Configuration \n" \
|
57 |
"To run our model, the basic environment configuration is required:\n" \
|
58 |
'- Python 3.8 or higher \n' \
|
59 |
'- Pyotrch 2.0 \n' \
|
@@ -61,9 +61,9 @@ description = "## Introduction π₯π₯π₯\n" \
|
|
61 |
'- opencv-python \n' \
|
62 |
'- Imageio \n' \
|
63 |
'- Matplotlib \n\n' \
|
64 |
-
"## Preprint Paper \n" \
|
65 |
"The paper is available at [arXiv](https://arxiv.org/abs/2305.09156) \n" \
|
66 |
-
"## Video Presentation \n" \
|
67 |
"The video presentation is available at [Video Record](https://recorder-v3.slideslive.com/?share=85662&s=6afe157c-e764-4e3c-9302-2c6dd6887db1/). \n" \
|
68 |
"## Conference Website \n" \
|
69 |
"The project is presented at [NeurIPS 2023](https://neurips.cc/virtual/2023/poster/70202). \n" \
|
@@ -76,9 +76,11 @@ examples = [["example_1.mp4", 62, 56], ["example_2.mp4", 59, 55], ["example_3.mp
|
|
76 |
# examples = [["example_1.mp4", 62, 56]]
|
77 |
md = "![](https://drive.google.com/uc?id=1WBqYsKRwn_78A72MJBrk643l3-gfAssP) \n" \
|
78 |
"## Author \n" \
|
79 |
-
"This project page is developed by Zitang Sun (zitangsun96 @ gmail.com)\n" \
|
80 |
"## LICENSE \n" \
|
81 |
"This project is licensed under the terms of the MIT license. \n"
|
|
|
|
|
82 |
|
83 |
if __name__ =='__main__':
|
84 |
# torch.cuda.init()
|
|
|
43 |
return [flow, activation, attention]
|
44 |
|
45 |
|
46 |
+
title = "Modelling Human Visual Motion Processing with Trainable Motion Energy Sensing and a Self-attention Network π€ "
|
47 |
description = "## Introduction π₯π₯π₯\n" \
|
48 |
" The intersection of cognitive neuroscience and computer vision offers exciting advancements in " \
|
49 |
"how machines perceive motion. Our research bridges the gap between these fields by proposing a novel " \
|
|
|
53 |
" physiological responses in V1 and MT neurons but also replicates human psychophysical responses " \
|
54 |
"to dynamic stimuli. \n\n\n" \
|
55 |
"![](https://drive.google.com/uc?id=10PcKzQ9X1nsXKUi8OPR0jN_ZsjlCAV47) \n" \
|
56 |
+
"## Environment Configuration π‘ \n" \
|
57 |
"To run our model, the basic environment configuration is required:\n" \
|
58 |
'- Python 3.8 or higher \n' \
|
59 |
'- Pyotrch 2.0 \n' \
|
|
|
61 |
'- opencv-python \n' \
|
62 |
'- Imageio \n' \
|
63 |
'- Matplotlib \n\n' \
|
64 |
+
"## Preprint Paper π \n" \
|
65 |
"The paper is available at [arXiv](https://arxiv.org/abs/2305.09156) \n" \
|
66 |
+
"## Video Presentation πΉ \n" \
|
67 |
"The video presentation is available at [Video Record](https://recorder-v3.slideslive.com/?share=85662&s=6afe157c-e764-4e3c-9302-2c6dd6887db1/). \n" \
|
68 |
"## Conference Website \n" \
|
69 |
"The project is presented at [NeurIPS 2023](https://neurips.cc/virtual/2023/poster/70202). \n" \
|
|
|
76 |
# examples = [["example_1.mp4", 62, 56]]
|
77 |
md = "![](https://drive.google.com/uc?id=1WBqYsKRwn_78A72MJBrk643l3-gfAssP) \n" \
|
78 |
"## Author \n" \
|
79 |
+
"This project page is developed by Zitang Sun π§ (zitangsun96 @ gmail.com)\n" \
|
80 |
"## LICENSE \n" \
|
81 |
"This project is licensed under the terms of the MIT license. \n"
|
82 |
+
"## Address \n" \
|
83 |
+
"Kyoto University, Japan\n"
|
84 |
|
85 |
if __name__ =='__main__':
|
86 |
# torch.cuda.init()
|