Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -16,85 +16,112 @@ def verify_environment():
|
|
16 |
if not find_spec(package):
|
17 |
raise ModuleNotFoundError(f"缺失关键依赖: {package}")
|
18 |
|
|
|
|
|
|
|
|
|
19 |
# 打印版本信息
|
20 |
try:
|
21 |
from importlib.metadata import version
|
22 |
print(f"Tenacity 版本: {version('tenacity')}")
|
|
|
23 |
except ImportError:
|
24 |
from pkg_resources import get_distribution
|
25 |
print(f"Tenacity 版本: {get_distribution('tenacity').version}")
|
|
|
26 |
|
27 |
if __name__ == "__main__":
|
28 |
verify_environment()
|
29 |
-
# 原有应用代码...
|
30 |
-
|
31 |
-
# 修改模型调用代码添加详细日志
|
32 |
-
def model_pipeline(input_text):
|
33 |
-
try:
|
34 |
-
print(f"\n--- 推理开始 ---\n输入: {input_text}")
|
35 |
-
|
36 |
-
# 模型加载验证
|
37 |
-
if not hasattr(model, "generate"):
|
38 |
-
raise AttributeError("模型未正确加载")
|
39 |
-
|
40 |
-
# 输入预处理
|
41 |
-
inputs = tokenizer(input_text, return_tensors="pt")
|
42 |
-
print(f"输入张量形状: {inputs['input_ids'].shape}")
|
43 |
-
|
44 |
-
# 推理执行
|
45 |
-
outputs = model.generate(**inputs, max_new_tokens=50)
|
46 |
-
print(f"输出张量形状: {outputs.shape}")
|
47 |
-
|
48 |
-
# 结果解码
|
49 |
-
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
50 |
-
print(f"解码结果: {decoded[:100]}...")
|
51 |
-
|
52 |
-
return decoded
|
53 |
-
except Exception as e:
|
54 |
-
print(f"\n!!! 推理错误 !!!\n类型: {type(e)}\n内容: {str(e)}\n")
|
55 |
-
return "[内部错误]"
|
56 |
-
|
57 |
-
|
58 |
|
59 |
# 使用 Hugging Face Inference API 调用云端模型(无需本地加载)
|
60 |
-
# 正确方法1:从环境变量读取
|
61 |
import os
|
62 |
-
client = InferenceClient(token=os.environ["HF_TOKEN"])
|
63 |
-
|
64 |
-
def medical_chat(user_input, history):
|
65 |
-
# 构建医学对话 prompt
|
66 |
-
prompt = f"患者:{user_input}\n医生:"
|
67 |
-
|
68 |
-
# 调用云端模型(示例使用微软BioGPT)
|
69 |
from tenacity import retry, stop_after_attempt, wait_exponential
|
70 |
|
|
|
|
|
|
|
|
|
71 |
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=10))
|
72 |
def safe_model_call(prompt):
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
#
|
87 |
-
|
88 |
-
|
89 |
-
# 添加安全过滤
|
90 |
-
if "死亡" in doctor_response or "癌症" in doctor_response:
|
91 |
-
return "⚠️ 请及时联系线下医疗机构进行专业诊断!"
|
92 |
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
# 构建 Gradio 界面
|
96 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
97 |
gr.Markdown("# 🏥 在线医疗问诊系统 (AI版)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
gr.ChatInterface(
|
99 |
fn=medical_chat,
|
100 |
examples=["头痛三天了怎么办?", "接种疫苗后发烧正常吗?"],
|
@@ -102,7 +129,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
102 |
)
|
103 |
|
104 |
# 启动应用(Hugging Face Spaces 会自动处理)
|
105 |
-
|
|
|
106 |
|
107 |
|
108 |
|
|
|
16 |
if not find_spec(package):
|
17 |
raise ModuleNotFoundError(f"缺失关键依赖: {package}")
|
18 |
|
19 |
+
# 检查环境变量
|
20 |
+
if "HF_TOKEN" not in os.environ:
|
21 |
+
print("警告: 未设置HF_TOKEN环境变量,模型调用可能会失败")
|
22 |
+
|
23 |
# 打印版本信息
|
24 |
try:
|
25 |
from importlib.metadata import version
|
26 |
print(f"Tenacity 版本: {version('tenacity')}")
|
27 |
+
print(f"Gradio 版本: {version('gradio')}")
|
28 |
except ImportError:
|
29 |
from pkg_resources import get_distribution
|
30 |
print(f"Tenacity 版本: {get_distribution('tenacity').version}")
|
31 |
+
print(f"Gradio 版本: {get_distribution('gradio').version}")
|
32 |
|
33 |
if __name__ == "__main__":
|
34 |
verify_environment()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# 使用 Hugging Face Inference API 调用云端模型(无需本地加载)
|
|
|
37 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
from tenacity import retry, stop_after_attempt, wait_exponential
|
39 |
|
40 |
+
# 安全地获取token,如果不存在则使用空字符串
|
41 |
+
hf_token = os.environ.get("HF_TOKEN", "")
|
42 |
+
client = InferenceClient(token=hf_token)
|
43 |
+
|
44 |
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=2, max=10))
|
45 |
def safe_model_call(prompt):
|
46 |
+
try:
|
47 |
+
return client.text_generation(
|
48 |
+
prompt=prompt,
|
49 |
+
model="microsoft/BioGPT-Large",
|
50 |
+
max_new_tokens=150,
|
51 |
+
temperature=0.7,
|
52 |
+
repetition_penalty=1.2
|
53 |
+
)
|
54 |
+
except Exception as e:
|
55 |
+
print(f"模型调用失败: {str(e)}")
|
56 |
+
raise
|
57 |
+
|
58 |
+
def medical_chat(user_input, history):
|
59 |
+
# 构建医学对话 prompt
|
60 |
+
prompt = f"患者:{user_input}\n医生:"
|
|
|
|
|
|
|
|
|
61 |
|
62 |
+
try:
|
63 |
+
# 调用云端模型
|
64 |
+
print(f"发送到模型的提示: {prompt}")
|
65 |
+
response = safe_model_call(prompt)
|
66 |
+
print(f"模型原始响应: {response}")
|
67 |
+
|
68 |
+
# 检查响应是否为空
|
69 |
+
if not response:
|
70 |
+
print("模型返回空响应")
|
71 |
+
return "系统错误: 模型返回空响应"
|
72 |
+
|
73 |
+
# 检查响应类型并适当处理
|
74 |
+
if isinstance(response, str):
|
75 |
+
# 如果响应是字符串,直接处理
|
76 |
+
doctor_response = response
|
77 |
+
else:
|
78 |
+
# 如果响应是其他类型的对象,尝试获取文本内容
|
79 |
+
# 这取决于InferenceClient的返回类型
|
80 |
+
print(f"响应类型: {type(response)}")
|
81 |
+
try:
|
82 |
+
# 尝试不同的属性或方法来获取文本
|
83 |
+
if hasattr(response, 'generated_text'):
|
84 |
+
doctor_response = response.generated_text
|
85 |
+
elif hasattr(response, 'text'):
|
86 |
+
doctor_response = response.text
|
87 |
+
else:
|
88 |
+
# 尝试将响应转换为字符串
|
89 |
+
doctor_response = str(response)
|
90 |
+
except Exception as e:
|
91 |
+
print(f"处理响应时出错: {str(e)}")
|
92 |
+
return f"系统错误: 无法处理模型响应 - {str(e)}"
|
93 |
+
|
94 |
+
print(f"处理后的响应: {doctor_response}")
|
95 |
+
|
96 |
+
# 如果响应中没有"医生:",则直接返回整���响应
|
97 |
+
if "医生:" not in doctor_response:
|
98 |
+
# 添加安全过滤
|
99 |
+
if "死亡" in doctor_response or "癌症" in doctor_response:
|
100 |
+
return "⚠️ 请及时联系线下医疗机构进行专业诊断!"
|
101 |
+
return doctor_response
|
102 |
+
|
103 |
+
# 提取医生回复部分
|
104 |
+
doctor_response = doctor_response.split("医生:")[-1].strip()
|
105 |
+
|
106 |
+
# 添加安全过滤
|
107 |
+
if "死亡" in doctor_response or "癌症" in doctor_response:
|
108 |
+
return "⚠️ 请及时联系线下医疗机构进行专业诊断!"
|
109 |
+
|
110 |
+
return doctor_response
|
111 |
+
except Exception as e:
|
112 |
+
print(f"调用模型时出错: {str(e)}")
|
113 |
+
return f"系统错误: {str(e)}"
|
114 |
|
115 |
# 构建 Gradio 界面
|
116 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
117 |
gr.Markdown("# 🏥 在线医疗问诊系统 (AI版)")
|
118 |
+
gr.Markdown("### 系统状态:")
|
119 |
+
|
120 |
+
# 显示系统状态
|
121 |
+
hf_token_status = "✅ 已设置" if "HF_TOKEN" in os.environ else "❌ 未设置"
|
122 |
+
gr.Markdown(f"- HF_TOKEN: {hf_token_status}")
|
123 |
+
gr.Markdown(f"- HF_ENDPOINT: {os.environ.get('HF_ENDPOINT', '默认')}")
|
124 |
+
|
125 |
gr.ChatInterface(
|
126 |
fn=medical_chat,
|
127 |
examples=["头痛三天了怎么办?", "接种疫苗后发烧正常吗?"],
|
|
|
129 |
)
|
130 |
|
131 |
# 启动应用(Hugging Face Spaces 会自动处理)
|
132 |
+
print("启动应用...")
|
133 |
+
demo.launch(debug=True) # 启用调试模式以获取更多错误信息
|
134 |
|
135 |
|
136 |
|