qnguyen3 commited on
Commit
69f9849
1 Parent(s): 1e610f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -33
app.py CHANGED
@@ -14,40 +14,40 @@ model.to("cuda:0")
14
 
15
  @spaces.GPU
16
  def bot_streaming(message, history):
17
- print(history)
18
- if message["files"]:
19
- image = message["files"][-1]["path"]
20
- else:
21
- # if there's no image uploaded for this turn, look for images in the past turns
22
- # kept inside tuples, take the last one
23
- for hist in history:
24
- if type(hist[0])==tuple:
25
- image = hist[0][0]
26
-
27
- if image is None:
28
- gr.Error("You need to upload an image for LLaVA to work.")
29
- prompt=f"[INST] <image>\n{message['text']} [/INST]"
30
- image = Image.open(image).convert("RGB")
31
- inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
32
-
33
- streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
34
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=100)
35
- generated_text = ""
36
-
37
- thread = Thread(target=model.generate, kwargs=generation_kwargs)
38
- thread.start()
39
-
40
- text_prompt =f"[INST] \n{message['text']} [/INST]"
41
-
42
-
43
- buffer = ""
44
- for new_text in streamer:
45
-
46
- buffer += new_text
47
 
48
- generated_text_without_prompt = buffer[len(text_prompt):]
49
- time.sleep(0.04)
50
- yield generated_text_without_prompt
 
 
 
 
 
51
 
52
 
53
  demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA NeXT", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
 
14
 
15
  @spaces.GPU
16
  def bot_streaming(message, history):
17
+ chat_history = []
18
+ if message["files"]:
19
+ image = message["files"][-1]["path"]
20
+ else:
21
+ for hist in history:
22
+ if type(hist[0])==tuple:
23
+ image = hist[0][0]
24
+
25
+ if len(history) > 0 and image:
26
+ chat_history.append({"role": "user", "content": f'<image>\n{message['text']}'})
27
+ for human, assistant in history[1:]:
28
+ chat_history.append({"role": "user", "content": human })
29
+ chat_history.append({"role": "assistant", "content": assistant })
30
+
31
+ if image is None:
32
+ gr.Error("You need to upload an image for LLaVA to work.")
33
+ prompt=f"[INST] <image>\n{message['text']} [/INST]"
34
+ image = Image.open(image).convert("RGB")
35
+ inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
36
+ streamer = TextIteratorStreamer(processor, **{"skip_special_tokens": True})
37
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=100)
38
+ generated_text = ""
39
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
40
+ thread.start()
41
+ text_prompt =f"[INST] \n{message['text']} [/INST]"
 
 
 
 
 
42
 
43
+ buffer = ""
44
+ for new_text in streamer:
45
+
46
+ buffer += new_text
47
+
48
+ generated_text_without_prompt = buffer[len(text_prompt):]
49
+ time.sleep(0.04)
50
+ yield generated_text_without_prompt
51
 
52
 
53
  demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA NeXT", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},