Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
client = InferenceClient(model="wangzhang/chatSDB-test") | |
def inference(message, history): | |
prompt = f"""### Instruction: | |
### Task: | |
根据巨杉数据库SequoiaDB的相关问题进行回答。 | |
### Input: | |
{message} | |
### Response: | |
""" | |
partial_message = "" | |
for token in client.text_generation(prompt=prompt, max_new_tokens=512, stream=True, best_of=1, temperature=0.1, | |
top_p=0.99, do_sample=True, repetition_penalty=1.2): | |
if token.startswith("<s>"): | |
return partial_message | |
partial_message += token | |
yield partial_message | |
gr.ChatInterface( | |
inference, | |
chatbot=gr.Chatbot(height=300, scale=7), | |
textbox=gr.Textbox(placeholder="你可以问我任何关于SequioaDB的问题!", container=False, scale=7), | |
description="这是SequioaDB旗下的AI智能大语言模型,训练超过上万条真实数据和7亿参数。", | |
title="ChatSDB", | |
examples=["SequoiaDB巨杉数据库是什么?", "SequoiaDB巨杉数据库支持哪些类型的数据库实例?"], | |
retry_btn="重试", | |
undo_btn="撤销", | |
clear_btn="清除", | |
submit_btn="提问", | |
).queue().launch() |