openlamm commited on
Commit
1c406fa
·
1 Parent(s): 35e8183

fix list bug

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -125,7 +125,7 @@ def predict(
125
  prompt_text += f' Human: {input}'
126
 
127
  response = model.generate({
128
- 'prompt': prompt_text,
129
  'image_paths': [image_path] if image_path else [],
130
  # 'audio_paths': [audio_path] if audio_path else [],
131
  # 'video_paths': [video_path] if video_path else [],
@@ -135,6 +135,8 @@ def predict(
135
  'max_tgt_len': max_length,
136
  'modality_embeds': modality_cache
137
  })
 
 
138
  chatbot.append((parse_text(input), parse_text(response)))
139
  history.append((input, response))
140
  return chatbot, history, modality_cache
@@ -151,7 +153,7 @@ def reset_state():
151
 
152
 
153
  with gr.Blocks(scale=4) as demo:
154
- gr.HTML("""<h1 align="center">PandaGPT</h1>""")
155
 
156
  with gr.Row(scale=1):
157
  with gr.Column(scale=1):
@@ -220,4 +222,4 @@ with gr.Blocks(scale=4) as demo:
220
  modality_cache
221
  ], show_progress=True)
222
 
223
- demo.queue().launch(enable_queue=True)
 
125
  prompt_text += f' Human: {input}'
126
 
127
  response = model.generate({
128
+ 'prompt': [prompt_text] if not isinstance(prompt_text, list) else prompt_text,
129
  'image_paths': [image_path] if image_path else [],
130
  # 'audio_paths': [audio_path] if audio_path else [],
131
  # 'video_paths': [video_path] if video_path else [],
 
135
  'max_tgt_len': max_length,
136
  'modality_embeds': modality_cache
137
  })
138
+ if isinstance(response, list):
139
+ response = response[0]
140
  chatbot.append((parse_text(input), parse_text(response)))
141
  history.append((input, response))
142
  return chatbot, history, modality_cache
 
153
 
154
 
155
  with gr.Blocks(scale=4) as demo:
156
+ gr.HTML("""<h1 align="center">LAMM</h1>""")
157
 
158
  with gr.Row(scale=1):
159
  with gr.Column(scale=1):
 
222
  modality_cache
223
  ], show_progress=True)
224
 
225
+ demo.queue().launch(share=True, enable_queue=True)