Spaces:
Sleeping
Sleeping
sanbo
commited on
Commit
·
10be726
1
Parent(s):
3391dd2
update sth. at 2024-11-15 18:53:47
Browse files- requirements.txt +103 -4
requirements.txt
CHANGED
@@ -1,4 +1,103 @@
|
|
1 |
-
gradio
|
2 |
-
huggingface_hub
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from huggingface_hub import InferenceClient
|
3 |
+
from PIL import Image
|
4 |
+
import googletrans # 用于中文到英文的翻译
|
5 |
+
|
6 |
+
# ===================== 核心逻辑模块 =====================
|
7 |
+
|
8 |
+
# 初始化翻译客户端
|
9 |
+
translator = googletrans.Translator()
|
10 |
+
|
11 |
+
# 初始化模型客户端
|
12 |
+
try:
|
13 |
+
# 文本聊天模型
|
14 |
+
client_text = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct")
|
15 |
+
|
16 |
+
# 图片生成模型
|
17 |
+
client_image = InferenceClient("black-forest-labs/FLUX.1-dev")
|
18 |
+
|
19 |
+
# 图像问答模型(暂时隐藏)
|
20 |
+
client_vqa = None
|
21 |
+
except Exception as e:
|
22 |
+
print(f"Error initializing clients: {e}")
|
23 |
+
|
24 |
+
# ---------- 文本聊天模块 ----------
|
25 |
+
def chat_with_model(messages):
|
26 |
+
"""
|
27 |
+
调用文本聊天模型生成对话内容。
|
28 |
+
"""
|
29 |
+
try:
|
30 |
+
response = client_text.chat_completion(messages, max_tokens=100)
|
31 |
+
return response["choices"][0]["message"]["content"]
|
32 |
+
except Exception as e:
|
33 |
+
print(f"Chat generation failed: {e}")
|
34 |
+
return "聊天生成失败,请稍后再试。"
|
35 |
+
|
36 |
+
# ---------- 图像生成模块 ----------
|
37 |
+
def image_gen(prompt):
|
38 |
+
"""
|
39 |
+
调用图像生成模型生成图像,并翻译中文提示词为英文。
|
40 |
+
"""
|
41 |
+
try:
|
42 |
+
# 如果是中文,先翻译成英文
|
43 |
+
if any(ord(c) > 127 for c in prompt): # 判断是否是中文字符
|
44 |
+
prompt = translator.translate(prompt, src='zh-cn', dest='en').text
|
45 |
+
|
46 |
+
# 显示服务正在生成图像
|
47 |
+
status_message = "图像生成中,请稍候..."
|
48 |
+
|
49 |
+
image = client_image.text_to_image(prompt)
|
50 |
+
|
51 |
+
# 图像生成完成,更新状态
|
52 |
+
status_message = "图像生成完成!"
|
53 |
+
|
54 |
+
return image, prompt, status_message # 返回图像和原始提示词及状态
|
55 |
+
except Exception as e:
|
56 |
+
print(f"Image generation failed: {e}")
|
57 |
+
return None, "图像生成失败,请稍后再试。", "图像生成失败,请稍后再试。" # 返回错误消息
|
58 |
+
|
59 |
+
# ===================== Gradio 界面构建 =====================
|
60 |
+
|
61 |
+
def build_interface():
|
62 |
+
"""
|
63 |
+
构建 Gradio 界面布局,包括文本聊天和图像生成模块。
|
64 |
+
"""
|
65 |
+
with gr.Blocks() as demo:
|
66 |
+
# 服务状态显示区域
|
67 |
+
status_output = gr.Textbox(label="服务状态", value="服务初始化中...", interactive=False)
|
68 |
+
|
69 |
+
# 文本聊天模块
|
70 |
+
with gr.Tab("文本聊天"):
|
71 |
+
chatbox_input = gr.Textbox(label="输入你的问题", placeholder="请提问...")
|
72 |
+
chatbox_output = gr.Textbox(label="回答")
|
73 |
+
chatbox_button = gr.Button("发送")
|
74 |
+
|
75 |
+
def chat_handler(user_input):
|
76 |
+
messages = [{"role": "user", "content": user_input}]
|
77 |
+
return chat_with_model(messages)
|
78 |
+
|
79 |
+
chatbox_button.click(chat_handler, inputs=chatbox_input, outputs=chatbox_output)
|
80 |
+
|
81 |
+
# 图像生成模块
|
82 |
+
with gr.Tab("图像生成"):
|
83 |
+
image_prompt = gr.Textbox(label="图像提示词", placeholder="描述你想生成的图像")
|
84 |
+
image_output = gr.Image(label="生成的图像")
|
85 |
+
image_description = gr.Textbox(label="提示词", placeholder="输入中文或英文提示词", interactive=False)
|
86 |
+
image_button = gr.Button("生成图像")
|
87 |
+
|
88 |
+
# 处理图像生成请求
|
89 |
+
def image_handler(prompt):
|
90 |
+
img, translated_prompt, status = image_gen(prompt)
|
91 |
+
return img, translated_prompt, status
|
92 |
+
|
93 |
+
image_button.click(image_handler, inputs=image_prompt, outputs=[image_output, image_description, status_output])
|
94 |
+
|
95 |
+
gr.Markdown("### 使用说明")
|
96 |
+
gr.Markdown("本助手支持文本聊天和图像生成功能,使用上方选项卡切换不同功能。")
|
97 |
+
|
98 |
+
return demo
|
99 |
+
|
100 |
+
# 启动 Gradio 界面
|
101 |
+
if __name__ == "__main__":
|
102 |
+
demo = build_interface()
|
103 |
+
demo.launch()
|