Seunggg commited on
Commit
0316dcf
·
verified ·
1 Parent(s): 7ea3332

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -18
app.py CHANGED
@@ -8,10 +8,8 @@ import json
8
  model_id = "deepseek-ai/deepseek-coder-1.3b-base"
9
  lora_id = "Seunggg/lora-plant"
10
 
11
- # 加载 tokenizer
12
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
13
 
14
- # 加载基础模型
15
  base = AutoModelForCausalLM.from_pretrained(
16
  model_id,
17
  device_map="auto",
@@ -20,7 +18,6 @@ base = AutoModelForCausalLM.from_pretrained(
20
  trust_remote_code=True
21
  )
22
 
23
- # 加载 LoRA adapter
24
  model = PeftModel.from_pretrained(
25
  base,
26
  lora_id,
@@ -30,7 +27,6 @@ model = PeftModel.from_pretrained(
30
 
31
  model.eval()
32
 
33
- # 创建 pipeline
34
  from transformers import pipeline
35
  pipe = pipeline(
36
  "text-generation",
@@ -52,7 +48,6 @@ def respond(user_input):
52
  sensor_display = get_sensor_data()
53
  if not user_input.strip():
54
  return sensor_display, "请输入植物相关的问题 😊"
55
-
56
  prompt = f"用户提问:{user_input}\n"
57
  try:
58
  sensor_response = requests.get("https://arduino-realtime.onrender.com/api/data", timeout=5)
@@ -60,25 +55,26 @@ def respond(user_input):
60
  if sensor_data:
61
  prompt += f"当前传感器数据:{json.dumps(sensor_data, ensure_ascii=False)}\n"
62
  prompt += "请用更人性化的语言生成建议,并推荐相关植物文献或资料。\n回答:"
63
-
64
  result = pipe(prompt)
65
  full_output = result[0]["generated_text"]
66
  answer = full_output.replace(prompt, "").strip()
67
  except Exception as e:
68
  answer = f"生成建议时出错:{str(e)}"
69
-
70
  return sensor_display, answer
71
 
72
- # Gradio 界面
73
- demo = gr.Interface(
74
- fn=respond,
75
- inputs=gr.Textbox(lines=4, label="植物问题"),
76
- outputs=[
77
- gr.Textbox(label="🧪 当前传感器数据", lines=6, interactive=False),
78
- gr.Textbox(label="🤖 回答建议", lines=8, interactive=False)
79
- ],
80
- title="🌱 植物助手 - 实时联动版",
81
- description="结合 Render 实时传感器数据 + 本地 LoRA 模型,生成更合理建议。"
82
- )
 
 
 
83
 
84
  demo.launch()
 
8
  model_id = "deepseek-ai/deepseek-coder-1.3b-base"
9
  lora_id = "Seunggg/lora-plant"
10
 
 
11
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
12
 
 
13
  base = AutoModelForCausalLM.from_pretrained(
14
  model_id,
15
  device_map="auto",
 
18
  trust_remote_code=True
19
  )
20
 
 
21
  model = PeftModel.from_pretrained(
22
  base,
23
  lora_id,
 
27
 
28
  model.eval()
29
 
 
30
  from transformers import pipeline
31
  pipe = pipeline(
32
  "text-generation",
 
48
  sensor_display = get_sensor_data()
49
  if not user_input.strip():
50
  return sensor_display, "请输入植物相关的问题 😊"
 
51
  prompt = f"用户提问:{user_input}\n"
52
  try:
53
  sensor_response = requests.get("https://arduino-realtime.onrender.com/api/data", timeout=5)
 
55
  if sensor_data:
56
  prompt += f"当前传感器数据:{json.dumps(sensor_data, ensure_ascii=False)}\n"
57
  prompt += "请用更人性化的语言生成建议,并推荐相关植物文献或资料。\n回答:"
 
58
  result = pipe(prompt)
59
  full_output = result[0]["generated_text"]
60
  answer = full_output.replace(prompt, "").strip()
61
  except Exception as e:
62
  answer = f"生成建议时出错:{str(e)}"
 
63
  return sensor_display, answer
64
 
65
+ def auto_update_sensor():
66
+ return gr.Textbox.update(value=get_sensor_data())
67
+
68
+ with gr.Blocks() as demo:
69
+ gr.Markdown("## 🌱 植物助手 - 实时联动版\n结合 Render 实时传感器数据 + 本地 LoRA 模型,生成更合理建议。")
70
+
71
+ sensor_box = gr.Textbox(label="🧪 当前传感器数据", lines=6, interactive=False)
72
+ user_input = gr.Textbox(label="植物问题", lines=4)
73
+ answer_box = gr.Textbox(label="🤖 回答建议", lines=8, interactive=False)
74
+
75
+ user_input.submit(fn=respond, inputs=user_input, outputs=[sensor_box, answer_box])
76
+
77
+ # 自动刷新传感器数据,每 5 秒
78
+ demo.load(fn=auto_update_sensor, inputs=None, outputs=sensor_box, every=5)
79
 
80
  demo.launch()