Jiaqi-hkust commited on
Commit
a56aadc
·
verified ·
1 Parent(s): 6be2c5b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -3
app.py CHANGED
@@ -6,10 +6,16 @@ python demo_video.py \
6
  --model_type llama_v2 \
7
  --gpu-id 0
8
  """
9
-
10
  import argparse
11
  import os
12
  import random
 
 
 
 
 
 
13
 
14
  import numpy as np
15
  import torch
@@ -36,7 +42,7 @@ import time
36
  def parse_args():
37
  parser = argparse.ArgumentParser(description="Demo")
38
  parser.add_argument("--cfg-path", required=False, default='./configs/eval_configs/eval.yaml', help="path to configuration file.")
39
- parser.add_argument("--gpu-id", type=int, default=6, help="specify the gpu to load the model.")
40
  parser.add_argument("--model_type", type=str, default='llama_v2', help="The type of LLM")
41
  parser.add_argument(
42
  "--options",
@@ -82,6 +88,7 @@ print('Initialization Finished')
82
  # Gradio Setting
83
  # ========================================
84
 
 
85
  def gradio_reset(chat_state, img_list):
86
  if chat_state is not None:
87
  chat_state.messages = []
@@ -89,6 +96,7 @@ def gradio_reset(chat_state, img_list):
89
  img_list = []
90
  return None, gr.update(value=None, interactive=True), gr.update(interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
91
 
 
92
  def upload_imgorvideo(gr_video, text_input, chat_state, chatbot):
93
  # if args.model_type == 'vicuna':
94
  # chat_state = default_conversation.copy()
@@ -114,6 +122,7 @@ def upload_imgorvideo(gr_video, text_input, chat_state, chatbot):
114
  # # img_list = []
115
  # return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot
116
 
 
117
  def gradio_ask(user_message, chatbot, chat_state):
118
  if len(user_message) == 0:
119
  return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
@@ -121,7 +130,7 @@ def gradio_ask(user_message, chatbot, chat_state):
121
  chatbot = chatbot + [[user_message, None]]
122
  return '', chatbot, chat_state
123
 
124
-
125
  def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
126
  llm_message = chat.answer(conv=chat_state,
127
  img_list=img_list,
 
6
  --model_type llama_v2 \
7
  --gpu-id 0
8
  """
9
+ import requests
10
  import argparse
11
  import os
12
  import random
13
+ import subprocess
14
+ import sys
15
+ import io
16
+ import spaces
17
+
18
+ subprocess.check_call([sys.executable, "-m", "spacy", "download", "en_core_web_sm"])
19
 
20
  import numpy as np
21
  import torch
 
42
  def parse_args():
43
  parser = argparse.ArgumentParser(description="Demo")
44
  parser.add_argument("--cfg-path", required=False, default='./configs/eval_configs/eval.yaml', help="path to configuration file.")
45
+ parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.")
46
  parser.add_argument("--model_type", type=str, default='llama_v2', help="The type of LLM")
47
  parser.add_argument(
48
  "--options",
 
88
  # Gradio Setting
89
  # ========================================
90
 
91
+ @spaces.GPU
92
  def gradio_reset(chat_state, img_list):
93
  if chat_state is not None:
94
  chat_state.messages = []
 
96
  img_list = []
97
  return None, gr.update(value=None, interactive=True), gr.update(interactive=False),gr.update(value="Upload & Start Chat", interactive=True), chat_state, img_list
98
 
99
+ @spaces.GPU
100
  def upload_imgorvideo(gr_video, text_input, chat_state, chatbot):
101
  # if args.model_type == 'vicuna':
102
  # chat_state = default_conversation.copy()
 
122
  # # img_list = []
123
  # return gr.update(interactive=False), gr.update(interactive=False, placeholder='Currently, only one input is supported'), gr.update(value="Currently, only one input is supported", interactive=False), chat_state, None,chatbot
124
 
125
+ @spaces.GPU
126
  def gradio_ask(user_message, chatbot, chat_state):
127
  if len(user_message) == 0:
128
  return gr.update(interactive=True, placeholder='Input should not be empty!'), chatbot, chat_state
 
130
  chatbot = chatbot + [[user_message, None]]
131
  return '', chatbot, chat_state
132
 
133
+ @spaces.GPU
134
  def gradio_answer(chatbot, chat_state, img_list, num_beams, temperature):
135
  llm_message = chat.answer(conv=chat_state,
136
  img_list=img_list,