Spaces:
Sleeping
Sleeping
File size: 6,724 Bytes
4a8ac8b 7abf81a cc873a6 f4bda8e c6414ce 7160aa7 941cba6 5be1557 941cba6 5be1557 a8b1053 941cba6 a8b1053 7e5261e 263464a 755ce4f f4bda8e a4633a8 f4bda8e a4633a8 f4bda8e a4633a8 755ce4f c6414ce 5be1557 c6414ce 941cba6 5be1557 e26becc 5be1557 e26becc 3391dd2 5be1557 e26becc 5be1557 e26becc 3391dd2 5be1557 941cba6 5be1557 c6414ce 755ce4f c6414ce 0dcbf45 f4bda8e 263464a 3012e75 f4bda8e 6dbf541 f4bda8e 263464a 755ce4f cc873a6 f4bda8e 6dbf541 f4bda8e 755ce4f f4bda8e 755ce4f f4bda8e 755ce4f 263464a c6414ce f4bda8e 9ddcb59 f4bda8e c6414ce 3391dd2 5be1557 941cba6 5be1557 941cba6 f4bda8e c6414ce |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import gradio as gr
from huggingface_hub import InferenceClient
import requests
# ===================== 核心逻辑模块 =====================
# 初始化模型客户端
try:
# 文本聊天模型
client_text = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
# 图片生成模型 1
client_image_1 = InferenceClient()
# 图片生成模型 2 (FLUX)
client_image_2 = InferenceClient("black-forest-labs/FLUX.1-dev")
# 更新状态为服务已启动
service_status = "服务已启动,您可以开始使用!"
except Exception as e:
print(f"Error initializing clients: {e}")
service_status = "服务初始化失败,请稍后再试。"
# ---------- 文本聊天模块 ----------
def chat_with_model(messages):
"""
调用文本聊天模型生成对话内容。
"""
try:
response = client_text.chat_completion(messages, max_tokens=100)
return response["choices"][0]["message"]["content"]
except Exception as e:
print(f"Chat generation failed: {e}")
return "聊天生成失败,请稍后再试。"
# ---------- chatgpt-4o-mini 模块 ----------
def chatgpt_4o_mini(system_prompt, Query):
url = 'https://sanbo1200-duck2api.hf.space/completions'
headers = {'Content-Type': 'application/json'}
data = {
"model": "gpt-4o-mini",
"messages": [
{"role": "system", "content": system_prompt}, # 使用用户自定义的系统提示词
{"role": "user", "content": Query}
],
"stream": False
}
# 发起 HTTP 请求
response = requests.post(url, json=data, headers=headers, stream=True)
response.encoding = 'utf-8'
if response.status_code != 200:
return "请求失败"
else:
json_data = response.json()
return json_data['choices'][0]['message']['content']
# ---------- 图像生成模块 ----------
def image_gen(prompt):
"""
调用两个图像生成模型,生成两个图像。
"""
try:
# 使用服务一 (默认模型)
print(f"Generating image from service 1 with prompt: {prompt}")
image_1 = client_image_1.text_to_image(prompt)
if image_1 is None:
print("Service 1 returned no image.")
# 使用服务二 (FLUX 模型)
print(f"Generating image from service 2 with prompt: {prompt}")
image_2 = client_image_2.text_to_image(prompt)
if image_2 is None:
print("Service 2 returned no image.")
return image_1, image_2 # 返回两个生成的图像
except Exception as e:
print(f"Image generation failed: {e}")
return None, None # 如果生成失败,返回两个空值
# ===================== Gradio 界面构建 =====================
def build_interface():
"""
构建 Gradio 界面布局,包括文本聊天、chatgpt-4o-mini 和图像生成模块。
"""
with gr.Blocks(css="""
@media (prefers-color-scheme: dark) {
.gradio-container {
background-color: #1e1e1e;
color: #ffffff;
font-family: Arial, sans-serif;
}
.tab-title {
color: #ffffff;
}
.gr-button {
background-color: #4A90E2;
color: white;
}
.gr-button:hover {
background-color: #357ABD;
}
}
@media (prefers-color-scheme: light) {
.gradio-container {
background-color: #ffffff;
color: #000000;
font-family: Arial, sans-serif;
}
.tab-title {
color: #000000;
}
.gr-button {
background-color: #4A90E2;
color: white;
}
.gr-button:hover {
background-color: #357ABD;
}
}
""") as demo:
gr.Markdown("<h1 style='text-align: center; color: #4A90E2;'>智能助手</h1>")
status_output = gr.Textbox(label="服务状态", value=service_status, interactive=False, elem_id="status_output")
# 文本聊天模块
with gr.Tab("Llama3.2-11B"):
chatbox_input = gr.Textbox(label="输入你的问题", placeholder="请提问...", elem_id="chatbox_input")
chatbox_output = gr.Markdown(label="回答", elem_id="chatbox_output") # 使用 Markdown 输出
chatbox_button = gr.Button("发送", elem_id="chatbox_button")
def chat_handler(user_input):
messages = [{"role": "user", "content": user_input}]
return chat_with_model(messages)
chatbox_button.click(chat_handler, inputs=chatbox_input, outputs=chatbox_output)
# chatgpt-4o-mini 模块
with gr.Tab("gpt4o"):
system_prompt_input = gr.Textbox(label="自定义系统提示词", placeholder="输入系统提示词...", elem_id="system_prompt_input")
chatgpt_input = gr.Textbox(label="输入你的问题", placeholder="请提问...", elem_id="chatgpt_input")
chatgpt_output = gr.Markdown(label="回答", elem_id="chatgpt_output") # 使用 Markdown 输出
chatgpt_button = gr.Button("发送", elem_id="chatgpt_button")
def chatgpt_handler(system_prompt, user_input):
return chatgpt_4o_mini(system_prompt, user_input)
chatgpt_button.click(chatgpt_handler, inputs=[system_prompt_input, chatgpt_input], outputs=chatgpt_output)
# 图像生成模块
with gr.Tab("图像生成"):
image_prompt = gr.Textbox(label="图像提示词", placeholder="描述你想生成的图像", elem_id="image_prompt")
with gr.Row():
image_output_1 = gr.Image(label="服务一生成的图像", elem_id="image_1", interactive=True)
image_output_2 = gr.Image(label="服务二生成的图像", elem_id="image_2", interactive=True)
image_button = gr.Button("生成图像", elem_id="image_button")
def image_handler(prompt):
img_1, img_2 = image_gen(prompt)
return img_1, img_2
image_button.click(image_handler, inputs=image_prompt, outputs=[image_output_1, image_output_2])
gr.Markdown("<h3 style='text-align: center;'>使用说明</h3>")
gr.Markdown("<p style='text-align: center;'>本助手支持文本聊天、chatgpt-4o-mini 和图像生成功能,使用上方选项卡切换不同功能。</p>")
return demo
# 启动 Gradio 界面
if __name__ == "__main__":
demo = build_interface()
demo.launch()
|