File size: 3,014 Bytes
5799733 2e49897 5799733 2e49897 5799733 d89d143 5799733 d89d143 5799733 d89d143 5799733 75b1ba6 e4e3ccf 55b26f1 e4e3ccf 55b26f1 75b1ba6 5799733 d89d143 e4e3ccf 5799733 459fbe3 5799733 459fbe3 5799733 459fbe3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import gradio as gr
from utils.logging_util import logger
from models.cpp_qwen2 import bot
#
# def postprocess(self, y):
# if y is None:
# return []
# for i, (message, response) in enumerate(y):
# y[i] = (
# None if message is None else mdtex2html.convert((message)),
# None if response is None else mdtex2html.convert(response),
# )
# return y
#
# gr.Chatbot.postprocess = postprocess
def generate_query(chatbot, history):
if history and history[-1]["role"] == "user":
gr.Warning('You should generate assistant-response.')
yield None, chatbot, history
else:
chatbot.append(None)
streamer = bot.generate_query(history, stream=True)
for query in streamer:
chatbot[-1] = (query, None)
yield query, chatbot, history
history.append({"role": "user", "content": query})
yield query, chatbot, history
def generate_response(chatbot, history, user_input=None):
"""
auto-mode:query is None
manual-mode:query 是用户输入
:param chatbot:
:param history:
:return:
"""
if user_input and history[-1]["role"] != "user":
history.append({"role": "user", "content": user_input})
query = history[-1]["content"]
if history[-1]["role"] != "user":
gr.Warning('You should generate or type user-input first.')
yield None, chatbot, history
else:
streamer = bot.generate_response(history, stream=True)
for response in streamer:
chatbot[-1] = (query, response)
yield response, chatbot, history
history.append({"role": "assistant", "content": response})
print(f"chatbot is {chatbot}")
print(f"history is {history}")
yield response, chatbot, history
def generate(chatbot, history):
logger.info(f"chatbot: {chatbot}; history: {history}")
streamer = None
if history[-1]["role"] in ["assistant", "system"]:
streamer = generate_query(chatbot, history)
elif history[-1]["role"] == "user":
streamer = generate_response(chatbot, history)
else:
gr.Warning("bug")
for out in streamer:
yield out
def regenerate():
"""
删除上一轮,重新生成。
:return:
"""
pass
def reset_user_input():
return gr.update(value='')
def reset_state(system):
return [], [{"role": "system", "content": system}]
def set_max_tokens(max_tokens):
bot.generation_kwargs["max_tokens"] = max_tokens
def set_top_p(top_p):
bot.generation_kwargs["top_p"] = top_p
def set_temperature(temperature):
bot.generation_kwargs["temperature"] = temperature
def undo_generate(chatbot, history):
if history[-1]["role"] == "user":
history = history[:-1]
chatbot = chatbot[:-1]
elif history[-1]["role"] == "assistant":
history = history[:-1]
chatbot[-1] = (chatbot[-1][0], None)
else:
pass
return "", chatbot, history
|