Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -58,19 +58,13 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
|
|
58 |
else:
|
59 |
image = Image.open(history[0][0][0])
|
60 |
for prompt, answer in history:
|
61 |
-
|
62 |
-
# conversation.extend([{"role": "user", "content":"<|image_1|>"},{"role": "assistant", "content": ""}])
|
63 |
-
# else:
|
64 |
-
conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
|
65 |
conversation.append({"role": "user", "content": message['text']})
|
66 |
print(f"Conversation is -\n{conversation}")
|
67 |
|
68 |
-
# streamer = TextIteratorStreamer(tokenizer, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
|
69 |
-
|
70 |
generate_kwargs = dict(
|
71 |
image=image,
|
72 |
msgs=conversation,
|
73 |
-
# streamer=streamer,
|
74 |
max_new_tokens=max_new_tokens,
|
75 |
temperature=temperature,
|
76 |
sampling=True,
|
@@ -78,16 +72,7 @@ def stream_chat(message, history: list, temperature: float, max_new_tokens: int)
|
|
78 |
)
|
79 |
if temperature == 0:
|
80 |
generate_kwargs["sampling"] = False
|
81 |
-
|
82 |
-
"""
|
83 |
-
thread = Thread(target=model.chat, kwargs=generate_kwargs)
|
84 |
-
thread.start()
|
85 |
|
86 |
-
buffer = ""
|
87 |
-
for new_text in streamer:
|
88 |
-
buffer += new_text
|
89 |
-
yield buffer
|
90 |
-
"""
|
91 |
response = model.chat(**generate_kwargs)
|
92 |
return response
|
93 |
|
|
|
58 |
else:
|
59 |
image = Image.open(history[0][0][0])
|
60 |
for prompt, answer in history:
|
61 |
+
conversation.extend([{"role": "user", "content": prompt}, {"role": "assistant", "content": answer}])
|
|
|
|
|
|
|
62 |
conversation.append({"role": "user", "content": message['text']})
|
63 |
print(f"Conversation is -\n{conversation}")
|
64 |
|
|
|
|
|
65 |
generate_kwargs = dict(
|
66 |
image=image,
|
67 |
msgs=conversation,
|
|
|
68 |
max_new_tokens=max_new_tokens,
|
69 |
temperature=temperature,
|
70 |
sampling=True,
|
|
|
72 |
)
|
73 |
if temperature == 0:
|
74 |
generate_kwargs["sampling"] = False
|
|
|
|
|
|
|
|
|
75 |
|
|
|
|
|
|
|
|
|
|
|
76 |
response = model.chat(**generate_kwargs)
|
77 |
return response
|
78 |
|