chenglu commited on
Commit
ea5422c
·
1 Parent(s): a6287f2
Files changed (1) hide show
  1. app.py +324 -288
app.py CHANGED
@@ -1,320 +1,356 @@
1
- import subprocess
2
- import sys
3
  import os
4
- import gradio as gr
 
 
5
 
6
- def install_package(package):
7
- """安装Python包"""
8
- try:
9
- subprocess.check_call([sys.executable, "-m", "pip", "install", package])
10
- return True
11
- except subprocess.CalledProcessError:
12
- return False
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- def check_and_install_dependencies():
15
- """检查并安装依赖"""
16
- print("🔍 检查依赖库...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- # 检查transformers
19
  try:
20
- import transformers
21
- print("✅ transformers 已安装")
22
- return True
23
- except ImportError:
24
- print(" transformers 未安装,尝试安装...")
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- # 尝试安装transformers
27
- packages_to_install = [
28
- "transformers==4.35.2",
29
- "accelerate==0.24.1",
30
- "bitsandbytes==0.41.3"
31
- ]
32
 
33
- for package in packages_to_install:
34
- print(f"📦 安装 {package}...")
35
- if install_package(package):
36
- print(f"✅ {package} 安装成功")
37
- else:
38
- print(f"❌ {package} 安装失败")
39
 
40
- # 再次检查
41
  try:
42
- import transformers
43
- print("✅ transformers 现已可用")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  return True
45
- except ImportError:
46
- print("❌ transformers 安装后仍不可用")
47
- return False
48
-
49
- # 检查并安装依赖
50
- dependencies_ok = check_and_install_dependencies()
51
-
52
- if dependencies_ok:
53
- # 如果依赖OK,导入所需库
54
- try:
55
- import torch
56
- from transformers import AutoTokenizer, AutoModel, AutoProcessor, Blip2ForConditionalGeneration
57
- from PIL import Image
58
- print("✅ 所有库导入成功")
59
-
60
- # 在这里放置你的完整应用代码
61
- # HF Spaces 环境检测
62
- IS_SPACES = os.environ.get("SPACE_ID") is not None
63
- print(f"Running on HF Spaces: {IS_SPACES}")
64
-
65
- # 设备配置
66
- device = "cuda" if torch.cuda.is_available() else "cpu"
67
- print(f"Using device: {device}")
68
-
69
- # 全局变量
70
- tokenizer = None
71
- model = None
72
- processor = None
73
- blip_model = None
74
-
75
- def load_models():
76
- """加载模型"""
77
- global tokenizer, model, processor, blip_model
78
 
 
 
 
 
 
79
  try:
80
- print("🔄 正在加载模型...")
81
-
82
- # 加载图像理解模型
83
- vision_model = "Salesforce/blip2-opt-2.7b"
84
- print(f"📷 加载图像模型: {vision_model}")
85
- processor = AutoProcessor.from_pretrained(vision_model)
86
-
87
- blip_model = Blip2ForConditionalGeneration.from_pretrained(
88
- vision_model,
89
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
90
- device_map="auto" if device == "cuda" else None,
91
- load_in_8bit=device == "cuda"
92
- )
93
-
94
- if device == "cpu":
95
- blip_model = blip_model.to("cpu")
96
-
97
- print("✅ 图像模型加载完成")
98
-
99
- # 加载对话模型
100
- model_name = "THUDM/chatglm2-6b-int4"
101
- print(f"💬 加载对话模型: {model_name}")
102
-
103
- tokenizer = AutoTokenizer.from_pretrained(
104
- model_name,
105
- trust_remote_code=True
106
- )
107
-
108
- model = AutoModel.from_pretrained(
109
- model_name,
110
- trust_remote_code=True,
111
- torch_dtype=torch.float16 if device == "cuda" else torch.float32,
112
- low_cpu_mem_usage=True
113
- )
114
 
115
  if device == "cuda":
116
- model = model.half().cuda()
117
  model.eval()
118
 
119
- print("✅ 对话模型加载完成")
120
  return True
121
 
122
- except Exception as e:
123
- print(f"❌ 模型加载失败: {str(e)}")
124
- return False
125
-
126
- def describe_image(image):
127
- """生成图像描述"""
128
- if blip_model is None or processor is None:
129
- return "图像模型未加载"
130
-
131
- try:
132
- if not isinstance(image, Image.Image):
133
- image = Image.fromarray(image)
134
-
135
- if image.size[0] > 512 or image.size[1] > 512:
136
- image.thumbnail((512, 512), Image.Resampling.LANCZOS)
137
-
138
- inputs = processor(image, return_tensors="pt")
139
-
140
- if device == "cuda":
141
- inputs = {k: v.to(device) for k, v in inputs.items()}
142
-
143
- with torch.no_grad():
144
- generated_ids = blip_model.generate(
145
- **inputs,
146
- max_new_tokens=30,
147
- num_beams=2,
148
- do_sample=False
149
- )
150
-
151
- caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
152
- return caption
153
-
154
- except Exception as e:
155
- print(f"图像描述错误: {str(e)}")
156
- return f"图像描述生成失败"
157
 
158
- def on_image_upload(image):
159
- """处理图像上传"""
160
- if image is None:
161
- return [], []
162
-
163
- try:
164
- print("🖼️ 处理上传的图像...")
165
- history = []
166
- chat_history = []
167
-
168
- caption = describe_image(image)
169
- print(f"图像描述: {caption}")
170
-
171
- prompt = f"这是一幅艺术作品,描述为: {caption}。请用中文对这件艺术作品进行介绍和分析。"
172
-
173
- if model is not None and tokenizer is not None:
174
- try:
175
- with torch.no_grad():
176
- response, history = model.chat(tokenizer, prompt, history=history)
177
- chat_history.append([image, response])
178
- print("✅ 初始分析完成")
179
- except Exception as e:
180
- print(f"对话生成错误: {str(e)}")
181
- chat_history.append([image, f"很抱歉,分析过程中出现了错误。请重新尝试。"])
182
- else:
183
- chat_history.append([image, "对话模型未正确加载,请刷新页面重试。"])
184
-
185
- return chat_history, history
186
-
187
- except Exception as e:
188
- print(f"图像处理错误: {str(e)}")
189
- return [[None, "图像处理失败,请重新上传。"]], []
 
190
 
191
- def on_user_message(user_message, chat_history, history):
192
- """处理用户消息"""
193
- if not user_message or not user_message.strip():
194
- yield chat_history or [], history or []
195
- return
196
-
197
- if model is None or tokenizer is None:
198
- chat_history = chat_history or []
199
- chat_history.append([user_message, "对话模型未加载,请刷新页面。"])
200
- yield chat_history, history or []
201
- return
202
-
203
- try:
204
- chat_history = chat_history or []
205
- history = history or []
206
- chat_history.append([user_message, ""])
207
-
208
- for output, new_history in model.stream_chat(
209
- tokenizer,
210
- user_message,
211
- history,
212
- max_length=2048,
213
- temperature=0.7,
214
- top_p=0.8
215
- ):
216
- chat_history[-1][1] = output
217
- yield chat_history, new_history
218
-
219
- except Exception as e:
220
- print(f"对话错误: {str(e)}")
221
- if chat_history:
222
- chat_history[-1][1] = "回复生成失败,请重试。"
223
- yield chat_history, history or []
224
 
225
- def clear_chat():
226
- """清空对话"""
227
- return [], []
228
 
229
- # 创建界面
230
- with gr.Blocks(title="AI艺术品讲解智能体") as demo:
231
- gr.HTML("""
232
- <div style="text-align: center; margin-bottom: 20px;">
233
- <h1>🎨 AI 艺术品讲解智能体</h1>
234
- <p>上传艺术品图像,获得专业的艺术分析和解读</p>
235
- </div>
236
- """)
237
-
238
- with gr.Row():
239
- with gr.Column(scale=1):
240
- image_input = gr.Image(
241
- label="📤 上传艺术品图像",
242
- type="pil",
243
- height=350
244
- )
245
- clear_btn = gr.Button("🗑️ 清空对话", variant="secondary")
246
-
247
- with gr.Column(scale=2):
248
- chatbot = gr.Chatbot(
249
- label="🤖 AI 分析师",
250
- height=500,
251
- show_label=True
252
- )
253
-
254
- user_input = gr.Textbox(
255
- label="💬 继续提问",
256
- placeholder="例如:这幅作品使用了什么绘画技法?创作背景如何?",
257
- lines=2
258
- )
259
-
260
- # 状态管理
261
- state = gr.State([])
262
-
263
- # 事件绑定
264
- image_input.upload(
265
- fn=on_image_upload,
266
- inputs=image_input,
267
- outputs=[chatbot, state],
268
- show_progress=True
269
- )
270
 
271
- user_input.submit(
272
- fn=on_user_message,
273
- inputs=[user_input, chatbot, state],
274
- outputs=[chatbot, state],
275
- show_progress=True
276
- )
 
 
 
277
 
278
- user_input.submit(lambda: "", inputs=[], outputs=[user_input])
279
- clear_btn.click(fn=clear_chat, inputs=[], outputs=[chatbot, state])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
- # 启动应用
282
- print("🚀 启动应用...")
283
- if load_models():
284
- print("✅ 启动成功")
285
- demo.queue(max_size=20).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286
  else:
287
- print("❌ 模型加载失败,启动简化版本")
288
- with gr.Blocks() as simple_demo:
289
- gr.HTML("<h2>模型加载失败</h2><p>请等待依赖安装完成后重试</p>")
290
- simple_demo.launch()
291
 
292
  except Exception as e:
293
- print(f" 导入失败: {str(e)}")
294
- # 创建错误页面
295
- with gr.Blocks() as error_demo:
296
- gr.HTML(f"""
297
- <div style="text-align: center; padding: 50px;">
298
- <h2>❌ 库导入失败</h2>
299
- <p>错误: {str(e)}</p>
300
- <p>正在尝试自动修复...</p>
301
- </div>
302
- """)
303
- error_demo.launch()
304
 
305
- else:
306
- # 依赖安装失败,显示错误页面
307
- with gr.Blocks() as error_demo:
 
 
 
 
 
 
308
  gr.HTML("""
309
- <div style="text-align: center; padding: 50px;">
310
- <h2>❌ 依赖安装失败</h2>
311
- <p>transformers 库无法安装</p>
312
- <p>请尝试以下解决方案:</p>
313
- <ol style="text-align: left; display: inline-block;">
314
- <li>检查 requirements.txt 文件是否存在</li>
315
- <li>在 Settings 中执行 Factory reboot</li>
316
- <li>等待 HF Spaces 重新构建环境</li>
317
- </ol>
318
  </div>
319
  """)
320
- error_demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import sys
3
+ import warnings
4
+ warnings.filterwarnings("ignore")
5
 
6
+ # 基础导入检查
7
+ try:
8
+ import gradio as gr
9
+ from PIL import Image
10
+ import torch
11
+ print(f"✅ PyTorch 版本: {torch.__version__}")
12
+
13
+ # 检查PyTorch版本
14
+ torch_version = torch.__version__
15
+ major, minor = map(int, torch_version.split('.')[:2])
16
+ if major < 2 or (major == 2 and minor < 6):
17
+ print(f"⚠️ PyTorch 版本 {torch_version} 可能存在安全问题")
18
+
19
+ except ImportError as e:
20
+ print(f"❌ 基础库导入失败: {e}")
21
+ with gr.Blocks() as error_demo:
22
+ gr.HTML("<h2>❌ 基础环境错误</h2>")
23
+ error_demo.launch()
24
+ sys.exit(1)
25
 
26
+ # 尝试导入transformers
27
+ try:
28
+ from transformers import AutoTokenizer, AutoModel, AutoProcessor, Blip2ForConditionalGeneration
29
+ print("✅ Transformers 导入成功")
30
+ except ImportError as e:
31
+ print(f"❌ Transformers 导入失败: {e}")
32
+ with gr.Blocks() as error_demo:
33
+ gr.HTML(f"<h2>❌ Transformers 未安装</h2><p>{str(e)}</p>")
34
+ error_demo.launch()
35
+ sys.exit(1)
36
+
37
+ # HF Spaces 环境检测
38
+ IS_SPACES = os.environ.get("SPACE_ID") is not None
39
+ device = "cuda" if torch.cuda.is_available() else "cpu"
40
+ print(f"环境: {'HF Spaces' if IS_SPACES else 'Local'}, 设备: {device}")
41
+
42
+ # 全局变量
43
+ tokenizer = None
44
+ model = None
45
+ processor = None
46
+ blip_model = None
47
+
48
+ def load_models():
49
+ """加载模型 - 改进版本"""
50
+ global tokenizer, model, processor, blip_model
51
 
 
52
  try:
53
+ print("🔄 开始加载模型...")
54
+
55
+ # 1. 首先加载图像模型(通常更稳定)
56
+ vision_model = "Salesforce/blip2-opt-2.7b"
57
+ print(f"📷 加载图像模型: {vision_model}")
58
+
59
+ processor = AutoProcessor.from_pretrained(vision_model)
60
+ blip_model = Blip2ForConditionalGeneration.from_pretrained(
61
+ vision_model,
62
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
63
+ device_map="auto" if device == "cuda" else None,
64
+ load_in_8bit=device == "cuda",
65
+ trust_remote_code=True
66
+ )
67
+
68
+ if device == "cpu":
69
+ blip_model = blip_model.to("cpu")
70
 
71
+ print("✅ 图像模型加载完成")
 
 
 
 
 
72
 
73
+ # 2. 加载对话模型 - 使用更兼容的配置
74
+ model_name = "THUDM/chatglm2-6b-int4"
75
+ print(f"💬 加载对话模型: {model_name}")
 
 
 
76
 
 
77
  try:
78
+ tokenizer = AutoTokenizer.from_pretrained(
79
+ model_name,
80
+ trust_remote_code=True,
81
+ use_fast=False # 使用慢速tokenizer可能更稳定
82
+ )
83
+
84
+ model = AutoModel.from_pretrained(
85
+ model_name,
86
+ trust_remote_code=True,
87
+ torch_dtype=torch.float16 if device == "cuda" else torch.float32,
88
+ low_cpu_mem_usage=True,
89
+ device_map="auto" if device == "cuda" else None
90
+ )
91
+
92
+ if device == "cuda":
93
+ model = model.half().cuda()
94
+ model.eval()
95
+
96
+ print("✅ 对话模型加载完成")
97
  return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
 
99
+ except Exception as chat_error:
100
+ print(f"⚠️ ChatGLM加载失败: {str(chat_error)}")
101
+ print("🔄 尝试使用备用对话模型...")
102
+
103
+ # 备用方案:使用更简单的对话模型
104
  try:
105
+ backup_model = "microsoft/DialoGPT-medium"
106
+ tokenizer = AutoTokenizer.from_pretrained(backup_model)
107
+ model = AutoModel.from_pretrained(backup_model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
108
 
109
  if device == "cuda":
110
+ model = model.cuda()
111
  model.eval()
112
 
113
+ print("✅ 备用对话模型加载完成")
114
  return True
115
 
116
+ except Exception as backup_error:
117
+ print(f"❌ 备用模型也加载失败: {str(backup_error)}")
118
+ # 至少图像模型可用
119
+ return "partial"
120
+
121
+ except Exception as e:
122
+ print(f"❌ 模型加载完全失败: {str(e)}")
123
+ return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
+ def describe_image(image):
126
+ """图像描述功能"""
127
+ if blip_model is None or processor is None:
128
+ return "图像模型未加载"
129
+
130
+ try:
131
+ if not isinstance(image, Image.Image):
132
+ image = Image.fromarray(image)
133
+
134
+ # 调整图像大小
135
+ if image.size[0] > 512 or image.size[1] > 512:
136
+ image.thumbnail((512, 512), Image.Resampling.LANCZOS)
137
+
138
+ inputs = processor(image, return_tensors="pt")
139
+
140
+ if device == "cuda":
141
+ inputs = {k: v.to(device) for k, v in inputs.items()}
142
+
143
+ with torch.no_grad():
144
+ generated_ids = blip_model.generate(
145
+ **inputs,
146
+ max_new_tokens=50,
147
+ num_beams=3,
148
+ do_sample=True,
149
+ temperature=0.7
150
+ )
151
+
152
+ caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
153
+ return caption
154
+
155
+ except Exception as e:
156
+ print(f"图像描述错误: {str(e)}")
157
+ return "图像描述生成失败"
158
 
159
+ def generate_analysis(caption):
160
+ """生成艺术品分析"""
161
+ if model is None or tokenizer is None:
162
+ # 如果没有对话模型,使用预设的分析模板
163
+ return f"""
164
+ 基于图像内容: {caption}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
 
166
+ 这幅艺术作品展现了独特的视觉表现力。从构图角度来看,作品体现了艺术家对空间布局的精心安排。
167
+ 色彩运用方面,展现了艺术家的色彩敏感度和表现技巧。
168
+ 整体而言,这是一件具有艺术价值的作品,值得进一步欣赏和研究。
169
 
170
+ 注:当前使用简化分析模式,如需详细分析请等待对话模型加载完成。
171
+ """.strip()
172
+
173
+ try:
174
+ prompt = f"这是一幅艺术作品,描述为: {caption}。请用中文对这件艺术作品进行详细的介绍和分析。"
175
+
176
+ # 检查模型类型
177
+ if hasattr(model, 'chat'):
178
+ # ChatGLM模型
179
+ response, _ = model.chat(tokenizer, prompt, history=[])
180
+ else:
181
+ # 其他模型的通用方法
182
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
183
+ if device == "cuda":
184
+ inputs = inputs.cuda()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
 
186
+ with torch.no_grad():
187
+ outputs = model.generate(
188
+ inputs,
189
+ max_length=inputs.shape[1] + 200,
190
+ num_return_sequences=1,
191
+ temperature=0.7,
192
+ do_sample=True,
193
+ pad_token_id=tokenizer.eos_token_id
194
+ )
195
 
196
+ response = tokenizer.decode(outputs[0][inputs.shape[1]:], skip_special_tokens=True)
197
+
198
+ return response
199
+
200
+ except Exception as e:
201
+ print(f"分析生成错误: {str(e)}")
202
+ return f"基于图像内容 '{caption}' 的分析生成遇到问题,请重试。"
203
+
204
+ def on_image_upload(image):
205
+ """处理图像上传"""
206
+ if image is None:
207
+ return [], []
208
+
209
+ try:
210
+ print("🖼️ 处理图像...")
211
+ history = []
212
+ chat_history = []
213
+
214
+ # 生成图像描述
215
+ caption = describe_image(image)
216
+ print(f"图像描述: {caption}")
217
+
218
+ # 生成分析
219
+ analysis = generate_analysis(caption)
220
+ chat_history.append([image, analysis])
221
+
222
+ return chat_history, history
223
+
224
+ except Exception as e:
225
+ print(f"图像处理错误: {str(e)}")
226
+ return [[None, "图像处理失败,请重新上传。"]], []
227
 
228
+ def on_user_message(user_message, chat_history, history):
229
+ """处理用户消息"""
230
+ if not user_message or not user_message.strip():
231
+ yield chat_history or [], history or []
232
+ return
233
+
234
+ chat_history = chat_history or []
235
+ history = history or []
236
+
237
+ if model is None or tokenizer is None:
238
+ chat_history.append([user_message, "对话功能暂时不可用,仅支持图像分析。"])
239
+ yield chat_history, history
240
+ return
241
+
242
+ try:
243
+ chat_history.append([user_message, ""])
244
+
245
+ # 检查模型类型并生成回复
246
+ if hasattr(model, 'stream_chat'):
247
+ # ChatGLM流式回复
248
+ for output, new_history in model.stream_chat(tokenizer, user_message, history):
249
+ chat_history[-1][1] = output
250
+ yield chat_history, new_history
251
  else:
252
+ # 其他模型的简单回复
253
+ response = generate_analysis(user_message) # 复用分析功能
254
+ chat_history[-1][1] = response
255
+ yield chat_history, history
256
 
257
  except Exception as e:
258
+ print(f"对话错误: {str(e)}")
259
+ chat_history[-1][1] = "对话生成失败,请重试。"
260
+ yield chat_history, history
 
 
 
 
 
 
 
 
261
 
262
+ def clear_chat():
263
+ return [], []
264
+
265
+ # 创建界面
266
+ def create_interface():
267
+ with gr.Blocks(
268
+ title="AI艺术品讲解智能体",
269
+ theme=gr.themes.Soft()
270
+ ) as demo:
271
  gr.HTML("""
272
+ <div style="text-align: center; margin-bottom: 20px;">
273
+ <h1>🎨 AI 艺术品讲解智能体</h1>
274
+ <p>上传艺术品图像,获得专业的艺术分析</p>
 
 
 
 
 
 
275
  </div>
276
  """)
277
+
278
+ with gr.Row():
279
+ with gr.Column(scale=1):
280
+ image_input = gr.Image(
281
+ label="📤 上传艺术品图像",
282
+ type="pil",
283
+ height=350
284
+ )
285
+ clear_btn = gr.Button("🗑️ 清空对话", variant="secondary")
286
+
287
+ # 模型状态显示
288
+ status_display = gr.HTML(
289
+ "<div style='padding:10px; background:#f0f0f0; border-radius:5px;'>"
290
+ "<b>模型状态:</b> 正在加载..."
291
+ "</div>"
292
+ )
293
+
294
+ with gr.Column(scale=2):
295
+ chatbot = gr.Chatbot(
296
+ label="🤖 AI 分析师",
297
+ height=500
298
+ )
299
+
300
+ user_input = gr.Textbox(
301
+ label="💬 继续提问",
302
+ placeholder="请输入关于艺术作品的问题...",
303
+ lines=2
304
+ )
305
+
306
+ state = gr.State([])
307
+
308
+ # 事件绑定
309
+ image_input.upload(
310
+ fn=on_image_upload,
311
+ inputs=image_input,
312
+ outputs=[chatbot, state]
313
+ )
314
+
315
+ user_input.submit(
316
+ fn=on_user_message,
317
+ inputs=[user_input, chatbot, state],
318
+ outputs=[chatbot, state]
319
+ )
320
+
321
+ user_input.submit(lambda: "", inputs=[], outputs=[user_input])
322
+ clear_btn.click(fn=clear_chat, inputs=[], outputs=[chatbot, state])
323
+
324
+ return demo, status_display
325
+
326
+ # 主程序
327
+ if __name__ == "__main__":
328
+ print("🚀 启动 AI 艺术品讲解智能体...")
329
+
330
+ demo, status_display = create_interface()
331
+
332
+ # 加载模型
333
+ model_status = load_models()
334
+
335
+ if model_status == True:
336
+ status_msg = "✅ 所有模型加载完成"
337
+ print(status_msg)
338
+ elif model_status == "partial":
339
+ status_msg = "⚠️ 图像模型可用,对话功能受限"
340
+ print(status_msg)
341
+ else:
342
+ status_msg = "❌ 模型加载失败"
343
+ print(status_msg)
344
+
345
+ # 更新状态显示
346
+ try:
347
+ demo.launch(
348
+ share=False,
349
+ show_error=True,
350
+ quiet=False
351
+ )
352
+ except Exception as e:
353
+ print(f"启动失败: {str(e)}")
354
+ with gr.Blocks() as error_demo:
355
+ gr.HTML(f"<h2>启动失败</h2><p>{str(e)}</p>")
356
+ error_demo.launch()