OpenCHAT-mini2 / app.py_v2
sanbo
update sth. at 2024-11-18 23:42:58
f4bda8e
import gradio as gr
import subprocess
from huggingface_hub import InferenceClient
from PIL import Image
import requests
import json
# ===================== 核心逻辑模块 =====================
# 初始化模型客户端
try:
# 文本聊天模型
client_text = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
# 图片生成模型 1
client_image_1 = InferenceClient()
# 图片生成模型 2 (FLUX)
client_image_2 = InferenceClient("black-forest-labs/FLUX.1-dev")
# 更新状态为服务已启动
service_status = "服务已启动,您可以开始使用!"
except Exception as e:
print(f"Error initializing clients: {e}")
service_status = "服务初始化失败,请稍后再试。"
# ---------- 文本聊天模块 ----------
def chat_with_model(messages):
"""
调用文本聊天模型生成对话内容。
"""
try:
response = client_text.chat_completion(messages, max_tokens=100)
return response["choices"][0]["message"]["content"]
except Exception as e:
print(f"Chat generation failed: {e}")
return "聊天生成失败,请稍后再试。"
# ---------- chatgpt-4o-mini 模块 ----------
def chatgpt_4o_mini(Query):
url = 'https://sanbo1200-duck2api.hf.space/completions'
headers = {'Content-Type': 'application/json'}
data = {
"model": "gpt-4o-mini",
"messages": [
{"role": "system", "content": "你是一个辅助机器人"},
{"role": "user", "content": Query}
],
"stream": False
}
# 发起 HTTP 请求
response = requests.post(url, json=data, headers=headers, stream=True)
response.encoding = 'utf-8'
if response.status_code!= 200:
return "请求失败"
else:
json_data = response.json()
return json_data['choices'][0]['message']['content']
# ---------- 图像生成模块 ----------
def image_gen(prompt):
"""
调用两个图像生成模型,生成两个图像。
"""
try:
# 使用服务一 (默认模型)
print(f"Generating image from service 1 with prompt: {prompt}")
image_1 = client_image_1.text_to_image(prompt)
if image_1 is None:
print("Service 1 returned no image.")
# 使用服务二 (FLUX 模型)
print(f"Generating image from service 2 with prompt: {prompt}")
image_2 = client_image_2.text_to_image(prompt)
if image_2 is None:
print("Service 2 returned no image.")
return image_1, image_2 # 返回两个生成的图像
except Exception as e:
print(f"Image generation failed: {e}")
return None, None # 如果生成失败,返回两个空值
# ===================== Gradio 界面构建 =====================
def build_interface():
"""
构建 Gradio 界面布局,包括文本聊天、chatgpt-4o-mini 和图像生成模块。
"""
with gr.Blocks() as demo:
# 服务状态显示区域
status_output = gr.Textbox(label="服务状态", value=service_status, interactive=False)
# 文本聊天模块
with gr.Tab("Llama3.2-11B"):
chatbox_input = gr.Textbox(label="输入你的问题", placeholder="请提问...")
chatbox_output = gr.Textbox(label="回答")
chatbox_button = gr.Button("发送")
def chat_handler(user_input):
messages = [{"role": "user", "content": user_input}]
return chat_with_model(messages)
chatbox_button.click(chat_handler, inputs=chatbox_input, outputs=chatbox_output)
# chatgpt-4o-mini 模块
with gr.Tab("gpt4o"):
chatgpt_input = gr.Textbox(label="输入你的问题", placeholder="请提问...")
chatgpt_output = gr.Textbox(label="回答")
chatgpt_button = gr.Button("发送")
def chatgpt_handler(user_input):
return chatgpt_4o_mini(user_input)
chatgpt_button.click(chatgpt_handler, inputs=chatgpt_input, outputs=chatgpt_output)
# 图像生成模块
with gr.Tab("图像生成"):
image_prompt = gr.Textbox(label="图像提示词", placeholder="描述你想生成的图像")
# 创建 Row 布局,左右分布图像
with gr.Row():
image_output_1 = gr.Image(label="服务一生成的图像", elem_id="image_1", interactive=True)
image_output_2 = gr.Image(label="服务二生成的图像", elem_id="image_2", interactive=True)
image_button = gr.Button("生成图像")
# 处理图像生成请求
def image_handler(prompt):
img_1, img_2 = image_gen(prompt)
return img_1, img_2
image_button.click(image_handler, inputs=image_prompt, outputs=[image_output_1, image_output_2])
gr.Markdown("### 使用说明")
gr.Markdown("本助手支持文本聊天、chatgpt-4o-mini 和图像生成功能,使用上方选项卡切换不同功能。")
return demo
# 启动 Gradio 界面
if __name__ == "__main__":
demo = build_interface()
demo.launch()