sunana commited on
Commit
d0d1f8c
1 Parent(s): 5c8ad42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -46,16 +46,21 @@ def process_images(videos, x, y):
46
  title = "Modelling Human Visual Motion Processing with Trainable Motion Energy Sensing and a Self-attention Network 🤗 "
47
  description = "## Introduction 🔥🔥🔥\n" \
48
  " The intersection of cognitive neuroscience and computer vision offers exciting advancements in " \
49
- "how machines perceive motion. Our research bridges the gap between these fields by proposing a novel " \
50
  "image-computable model that aligns with human motion perception mechanisms. By integrating trainable" \
51
  " motion energy sensing with recurrent self-attention networks, we can simulate the complex motion " \
52
  "processing of the human visual cortex, particularly the V1-MT pathway. Our model not only parallels" \
53
  " physiological responses in V1 and MT neurons but also replicates human psychophysical responses " \
54
  "to dynamic stimuli. \n\n\n" \
 
 
 
 
 
55
  "![](https://drive.google.com/uc?id=10PcKzQ9X1nsXKUi8OPR0jN_ZsjlCAV47) \n" \
56
  "## Environment Configuration 🐡 \n" \
57
  "To run our model, the basic environment configuration is required:\n" \
58
- '- gradio == 4.7.1'\
59
  '- Python 3.8 or higher \n' \
60
  '- Pyotrch 2.0 \n' \
61
  '- CUDA Toolkit 11.x (for GPU acceleration)\n' \
@@ -108,7 +113,7 @@ if __name__ =='__main__':
108
  print('Number fo parameters: {}'.format(model.num_parameters()))
109
  model.to(device)
110
  model_dict = torch.load('Model_example.pth.tar', map_location="cpu")['state_dict']
111
- # save model
112
  model.load_state_dict(model_dict, strict=True)
113
  model.eval()
114
 
@@ -118,7 +123,7 @@ if __name__ =='__main__':
118
  gr.Slider(0, 100, label='Y location of attention visualizer')],
119
  # out put is three images
120
  outputs=[gr.Image(type="numpy", label="Motion flow field"),
121
- gr.Image(type="numpy", label="Activation of Stage I"),
122
  gr.Image(type="numpy", label="Attention map of Stage II")],
123
  title=title,
124
  description=description,
 
46
  title = "Modelling Human Visual Motion Processing with Trainable Motion Energy Sensing and a Self-attention Network 🤗 "
47
  description = "## Introduction 🔥🔥🔥\n" \
48
  " The intersection of cognitive neuroscience and computer vision offers exciting advancements in " \
49
+ "how machines perceive motion like humans. Our research bridges the gap between these fields by proposing a novel " \
50
  "image-computable model that aligns with human motion perception mechanisms. By integrating trainable" \
51
  " motion energy sensing with recurrent self-attention networks, we can simulate the complex motion " \
52
  "processing of the human visual cortex, particularly the V1-MT pathway. Our model not only parallels" \
53
  " physiological responses in V1 and MT neurons but also replicates human psychophysical responses " \
54
  "to dynamic stimuli. \n\n\n" \
55
+ "認知神経科学とコンピュータビジョンの交差点は、機械が人間のように動きを認識する方法において、興味深い進歩を提供します。"\
56
+ "私たちの研究は、これらの分野間のギャップを埋めるために、人間の動作知覚メカニズムに合致する新しい画像計算可能なモデルを提案しています。"\
57
+ "訓練可能な動きエネルギー感知をリカレント自己注意ネットワークと統合することにより、"\
58
+ "特にV1-MT経路における人間の視覚皮質の複雑な動き処理をシミュレートすることができます。"\
59
+ "私たちのモデルは、V1およびMTニューロンでの生理的反応と並行して、動的刺激に対する人間の心理物理学的反応も再現します。"\
60
  "![](https://drive.google.com/uc?id=10PcKzQ9X1nsXKUi8OPR0jN_ZsjlCAV47) \n" \
61
  "## Environment Configuration 🐡 \n" \
62
  "To run our model, the basic environment configuration is required:\n" \
63
+ '- gradio == 4.7.1 \n'\
64
  '- Python 3.8 or higher \n' \
65
  '- Pyotrch 2.0 \n' \
66
  '- CUDA Toolkit 11.x (for GPU acceleration)\n' \
 
113
  print('Number fo parameters: {}'.format(model.num_parameters()))
114
  model.to(device)
115
  model_dict = torch.load('Model_example.pth.tar', map_location="cpu")['state_dict']
116
+ # load an example model
117
  model.load_state_dict(model_dict, strict=True)
118
  model.eval()
119
 
 
123
  gr.Slider(0, 100, label='Y location of attention visualizer')],
124
  # out put is three images
125
  outputs=[gr.Image(type="numpy", label="Motion flow field"),
126
+ gr.Image(type="numpy", label="Neural Activation of Stage I"),
127
  gr.Image(type="numpy", label="Attention map of Stage II")],
128
  title=title,
129
  description=description,