OpenCHAT-mini2 / app.py
sanbo
update sth. at 2024-11-18 23:53:12
6dbf541
import gradio as gr
from huggingface_hub import InferenceClient
import requests
# ===================== 核心逻辑模块 =====================
# 初始化模型客户端
try:
# 文本聊天模型
client_text = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
# 图片生成模型 1
client_image_1 = InferenceClient()
# 图片生成模型 2 (FLUX)
client_image_2 = InferenceClient("black-forest-labs/FLUX.1-dev")
# 更新状态为服务已启动
service_status = "服务已启动,您可以开始使用!"
except Exception as e:
print(f"Error initializing clients: {e}")
service_status = "服务初始化失败,请稍后再试。"
# ---------- 文本聊天模块 ----------
def chat_with_model(messages):
"""
调用文本聊天模型生成对话内容。
"""
try:
response = client_text.chat_completion(messages, max_tokens=100)
return response["choices"][0]["message"]["content"]
except Exception as e:
print(f"Chat generation failed: {e}")
return "聊天生成失败,请稍后再试。"
# ---------- chatgpt-4o-mini 模块 ----------
def chatgpt_4o_mini(system_prompt, Query):
url = 'https://sanbo1200-duck2api.hf.space/completions'
headers = {'Content-Type': 'application/json'}
data = {
"model": "gpt-4o-mini",
"messages": [
{"role": "system", "content": system_prompt}, # 使用用户自定义的系统提示词
{"role": "user", "content": Query}
],
"stream": False
}
# 发起 HTTP 请求
response = requests.post(url, json=data, headers=headers, stream=True)
response.encoding = 'utf-8'
if response.status_code != 200:
return "请求失败"
else:
json_data = response.json()
return json_data['choices'][0]['message']['content']
# ---------- 图像生成模块 ----------
def image_gen(prompt):
"""
调用两个图像生成模型,生成两个图像。
"""
try:
# 使用服务一 (默认模型)
print(f"Generating image from service 1 with prompt: {prompt}")
image_1 = client_image_1.text_to_image(prompt)
if image_1 is None:
print("Service 1 returned no image.")
# 使用服务二 (FLUX 模型)
print(f"Generating image from service 2 with prompt: {prompt}")
image_2 = client_image_2.text_to_image(prompt)
if image_2 is None:
print("Service 2 returned no image.")
return image_1, image_2 # 返回两个生成的图像
except Exception as e:
print(f"Image generation failed: {e}")
return None, None # 如果生成失败,返回两个空值
# ===================== Gradio 界面构建 =====================
def build_interface():
"""
构建 Gradio 界面布局,包括文本聊天、chatgpt-4o-mini 和图像生成模块。
"""
with gr.Blocks(css="""
@media (prefers-color-scheme: dark) {
.gradio-container {
background-color: #1e1e1e;
color: #ffffff;
font-family: Arial, sans-serif;
}
.tab-title {
color: #ffffff;
}
.gr-button {
background-color: #4A90E2;
color: white;
}
.gr-button:hover {
background-color: #357ABD;
}
}
@media (prefers-color-scheme: light) {
.gradio-container {
background-color: #ffffff;
color: #000000;
font-family: Arial, sans-serif;
}
.tab-title {
color: #000000;
}
.gr-button {
background-color: #4A90E2;
color: white;
}
.gr-button:hover {
background-color: #357ABD;
}
}
""") as demo:
gr.Markdown("<h1 style='text-align: center; color: #4A90E2;'>智能助手</h1>")
status_output = gr.Textbox(label="服务状态", value=service_status, interactive=False, elem_id="status_output")
# 文本聊天模块
with gr.Tab("Llama3.2-11B"):
chatbox_input = gr.Textbox(label="输入你的问题", placeholder="请提问...", elem_id="chatbox_input")
chatbox_output = gr.Markdown(label="回答", elem_id="chatbox_output") # 使用 Markdown 输出
chatbox_button = gr.Button("发送", elem_id="chatbox_button")
def chat_handler(user_input):
messages = [{"role": "user", "content": user_input}]
return chat_with_model(messages)
chatbox_button.click(chat_handler, inputs=chatbox_input, outputs=chatbox_output)
# chatgpt-4o-mini 模块
with gr.Tab("gpt4o"):
system_prompt_input = gr.Textbox(label="自定义系统提示词", placeholder="输入系统提示词...", elem_id="system_prompt_input")
chatgpt_input = gr.Textbox(label="输入你的问题", placeholder="请提问...", elem_id="chatgpt_input")
chatgpt_output = gr.Markdown(label="回答", elem_id="chatgpt_output") # 使用 Markdown 输出
chatgpt_button = gr.Button("发送", elem_id="chatgpt_button")
def chatgpt_handler(system_prompt, user_input):
return chatgpt_4o_mini(system_prompt, user_input)
chatgpt_button.click(chatgpt_handler, inputs=[system_prompt_input, chatgpt_input], outputs=chatgpt_output)
# 图像生成模块
with gr.Tab("图像生成"):
image_prompt = gr.Textbox(label="图像提示词", placeholder="描述你想生成的图像", elem_id="image_prompt")
with gr.Row():
image_output_1 = gr.Image(label="服务一生成的图像", elem_id="image_1", interactive=True)
image_output_2 = gr.Image(label="服务二生成的图像", elem_id="image_2", interactive=True)
image_button = gr.Button("生成图像", elem_id="image_button")
def image_handler(prompt):
img_1, img_2 = image_gen(prompt)
return img_1, img_2
image_button.click(image_handler, inputs=image_prompt, outputs=[image_output_1, image_output_2])
gr.Markdown("<h3 style='text-align: center;'>使用说明</h3>")
gr.Markdown("<p style='text-align: center;'>本助手支持文本聊天、chatgpt-4o-mini 和图像生成功能,使用上方选项卡切换不同功能。</p>")
return demo
# 启动 Gradio 界面
if __name__ == "__main__":
demo = build_interface()
demo.launch()