Bobber Cheng commited on
Commit
771ca71
·
1 Parent(s): 72869ac

fix indentation error

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -71,10 +71,10 @@ def stream_chat(input_image: Image.Image, vlm_prompt):
71
  image = image.to('cuda')
72
 
73
  # Tokenize the prompt
74
- if not vlm_prompt:
75
- vlm_prompt = VLM_PROMPT
76
- vlm_prompt = vlm_prompt + "\n"
77
- prompt = tokenizer.encode(
78
  vlm_prompt,
79
  return_tensors='pt',
80
  padding=False,
@@ -130,9 +130,8 @@ with gr.Blocks() as demo:
130
 
131
  with gr.Column():
132
  output_caption = gr.Textbox(label="Caption")
133
-
134
- with gr.Row():
135
- vlm_prompt = gr.Text(
136
  label="VLM Prompt",
137
  show_label=False,
138
  max_lines=1,
@@ -140,7 +139,7 @@ with gr.Blocks() as demo:
140
  container=False,
141
  value="A descriptive caption for this image:",
142
  )
143
-
144
  run_button.click(fn=stream_chat, inputs=[input_image, vlm_prompt], outputs=[output_caption])
145
 
146
 
 
71
  image = image.to('cuda')
72
 
73
  # Tokenize the prompt
74
+ if not vlm_prompt:
75
+ vlm_prompt = VLM_PROMPT
76
+ vlm_prompt = vlm_prompt + "\n"
77
+ prompt = tokenizer.encode(
78
  vlm_prompt,
79
  return_tensors='pt',
80
  padding=False,
 
130
 
131
  with gr.Column():
132
  output_caption = gr.Textbox(label="Caption")
133
+ with gr.Row():
134
+ vlm_prompt = gr.Text(
 
135
  label="VLM Prompt",
136
  show_label=False,
137
  max_lines=1,
 
139
  container=False,
140
  value="A descriptive caption for this image:",
141
  )
142
+
143
  run_button.click(fn=stream_chat, inputs=[input_image, vlm_prompt], outputs=[output_caption])
144
 
145