Spaces:
BAAI
/
Running on L40S

ryanzhangfan commited on
Commit
9f2b36a
1 Parent(s): 8a3eb3c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -24,7 +24,7 @@ import base64
24
 
25
  def image2str(image):
26
  buf = io.BytesIO()
27
- image.save(buf, format="WEBP")
28
  i_str = base64.b64encode(buf.getvalue()).decode()
29
  return f'<div style="float:left"><img src="data:image/png;base64, {i_str}"></div>'
30
 
@@ -46,7 +46,6 @@ VQ_HUB = "BAAI/Emu3-VisionTokenizer"
46
 
47
  # Prepare models and processors
48
  # Emu3-Gen model and processor
49
- """
50
  gen_model = AutoModelForCausalLM.from_pretrained(
51
  EMU_GEN_HUB,
52
  device_map="cpu",
@@ -54,7 +53,6 @@ gen_model = AutoModelForCausalLM.from_pretrained(
54
  attn_implementation="flash_attention_2",
55
  trust_remote_code=True,
56
  )
57
- """
58
 
59
  # Emu3-Chat model and processor
60
  chat_model = AutoModelForCausalLM.from_pretrained(
@@ -77,11 +75,10 @@ processor = Emu3Processor(
77
  )
78
 
79
  print(device)
80
- # gen_model.to(device)
81
  chat_model.to(device)
82
  image_tokenizer.to(device)
83
 
84
- """
85
  @spaces.GPU(duration=120)
86
  def generate_image(prompt):
87
  POSITIVE_PROMPT = " masterpiece, film grained, best quality."
@@ -141,7 +138,6 @@ def generate_image(prompt):
141
  if isinstance(im, Image.Image):
142
  return im
143
  return None
144
- """
145
 
146
  @spaces.GPU
147
  def vision_language_understanding(image, text):
@@ -180,19 +176,19 @@ def chat(history, user_input, user_image):
180
  # Append the user input and response to the history
181
  history = history + [(image2str(user_image) + "<br>" + user_input, response)]
182
  else:
183
- history = history + [(user_input, "Currently do not support image genration, please provide an valid image.")]
184
- """
185
  # Use Emu3-Gen for image generation
186
  generated_image = generate_image(user_input)
187
  if generated_image is not None:
188
  # Append the user input and generated image to the history
189
- history = history + [(user_input, generated_image)]
190
  else:
191
  # If image generation failed, respond with an error message
192
  history = history + [
193
  (user_input, "Sorry, I could not generate an image.")
194
  ]
195
- """
196
  return history, history, gr.update(value=None)
197
 
198
  def clear_input():
 
24
 
25
  def image2str(image):
26
  buf = io.BytesIO()
27
+ image.save(buf, format="PNG")
28
  i_str = base64.b64encode(buf.getvalue()).decode()
29
  return f'<div style="float:left"><img src="data:image/png;base64, {i_str}"></div>'
30
 
 
46
 
47
  # Prepare models and processors
48
  # Emu3-Gen model and processor
 
49
  gen_model = AutoModelForCausalLM.from_pretrained(
50
  EMU_GEN_HUB,
51
  device_map="cpu",
 
53
  attn_implementation="flash_attention_2",
54
  trust_remote_code=True,
55
  )
 
56
 
57
  # Emu3-Chat model and processor
58
  chat_model = AutoModelForCausalLM.from_pretrained(
 
75
  )
76
 
77
  print(device)
78
+ gen_model.to(device)
79
  chat_model.to(device)
80
  image_tokenizer.to(device)
81
 
 
82
  @spaces.GPU(duration=120)
83
  def generate_image(prompt):
84
  POSITIVE_PROMPT = " masterpiece, film grained, best quality."
 
138
  if isinstance(im, Image.Image):
139
  return im
140
  return None
 
141
 
142
  @spaces.GPU
143
  def vision_language_understanding(image, text):
 
176
  # Append the user input and response to the history
177
  history = history + [(image2str(user_image) + "<br>" + user_input, response)]
178
  else:
179
+ # history = history + [(user_input, "Currently do not support image genration, please provide an valid image.")]
180
+ # """
181
  # Use Emu3-Gen for image generation
182
  generated_image = generate_image(user_input)
183
  if generated_image is not None:
184
  # Append the user input and generated image to the history
185
+ history = history + [(user_input, image2str(generated_image))]
186
  else:
187
  # If image generation failed, respond with an error message
188
  history = history + [
189
  (user_input, "Sorry, I could not generate an image.")
190
  ]
191
+ # """
192
  return history, history, gr.update(value=None)
193
 
194
  def clear_input():