lihongze8 commited on
Commit
c442fd5
·
verified ·
1 Parent(s): 38733e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -17
app.py CHANGED
@@ -1,31 +1,26 @@
1
  import os
2
  import subprocess
3
  import sys
 
4
 
5
- # 克隆代码并设置环境
6
  def setup_environment():
7
- # 检查目录是否已存在
8
  if not os.path.exists("skywork-o1-prm-inference"):
9
  print("Cloning repository...")
10
  subprocess.run(["git", "clone", "https://github.com/SkyworkAI/skywork-o1-prm-inference.git"], check=True)
11
-
12
- # 添加到 Python 路径
13
  repo_path = os.path.abspath("skywork-o1-prm-inference")
14
  if repo_path not in sys.path:
15
  sys.path.append(repo_path)
16
  print(f"Added {repo_path} to Python path")
17
 
18
- # 设置环境
19
  setup_environment()
20
 
21
- # 现在可以导入需要的模块
22
  import gradio as gr
23
  from transformers import AutoTokenizer
24
  from model_utils.prm_model import PRM_MODEL
25
  from model_utils.io_utils import prepare_input, prepare_batch_input_for_model, derive_step_rewards
26
  import torch
27
 
28
- # 初始化模型和tokenizer
29
  model_id = "Skywork/Skywork-o1-Open-PRM-Qwen-2.5-1.5B"
30
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
31
  model = PRM_MODEL.from_pretrained(model_id).to("cpu").eval()
@@ -33,9 +28,7 @@ model = PRM_MODEL.from_pretrained(model_id).to("cpu").eval()
33
  def evaluate(problem, response):
34
  try:
35
  processed_data = prepare_input(problem, response, tokenizer=tokenizer, step_token="\n")
36
- input_ids, steps, reward_flags = [processed_data]
37
-
38
- input_ids, attention_mask, reward_flags = prepare_batch_input_for_model(
39
  input_ids,
40
  reward_flags,
41
  tokenizer.pad_token_id
@@ -53,9 +46,10 @@ def evaluate(problem, response):
53
  return_probs=True
54
  )
55
  step_rewards = derive_step_rewards(rewards, reward_flags)
56
- return step_rewards[0].tolist()
 
57
  except Exception as e:
58
- return str(e)
59
 
60
  # 创建Gradio界面
61
  iface = gr.Interface(
@@ -64,16 +58,17 @@ iface = gr.Interface(
64
  gr.Textbox(label="Problem", lines=4),
65
  gr.Textbox(label="Response", lines=8)
66
  ],
67
- outputs=gr.JSON(label="Step Rewards"),
68
  title="Problem Response Evaluation",
69
  description="Enter a problem and its response to get step-wise rewards",
70
  examples=[
71
  [
72
- "Janet's ducks lay 16 eggs per day. She eats three for breakfast every morning and bakes muffins for her friends every day with four. She sells the remainder at the farmers' market daily for $2 per fresh duck egg. How much in dollars does she make every day at the farmers' market?",
73
- "To determine how much money Janet makes every day at the farmers' market, we need to follow these steps:\n1. Calculate the total number of eggs laid by the ducks per day.\n Janet's ducks lay 16 eggs per day.\n2. Determine the number of eggs Janet uses each day.\n - She eats 3 eggs for breakfast every morning.\n - She bakes muffins for her friends every day with 4 eggs.\n So, the total number of eggs used per day is:\n 3 + 4 = 7 eggs\n3. Calculate the number of eggs Janet sells at the farmers' market each day.\n Subtract the number of eggs used from the total number of eggs laid:\n 16 - 7 = 9 eggs\n4. Determine how much money Janet makes from selling the eggs.\n She sells each egg for $2, so the total amount of money she makes is:\n 9 × 2 = 18 dollars\nTherefore, the amount of money Janet makes every day at the farmers' market is $18."
74
  ]
75
- ]
 
76
  )
77
 
78
  # 启动接口
79
- iface.launch()
 
1
  import os
2
  import subprocess
3
  import sys
4
+ import json
5
 
6
+ # 设置环境
7
  def setup_environment():
 
8
  if not os.path.exists("skywork-o1-prm-inference"):
9
  print("Cloning repository...")
10
  subprocess.run(["git", "clone", "https://github.com/SkyworkAI/skywork-o1-prm-inference.git"], check=True)
 
 
11
  repo_path = os.path.abspath("skywork-o1-prm-inference")
12
  if repo_path not in sys.path:
13
  sys.path.append(repo_path)
14
  print(f"Added {repo_path} to Python path")
15
 
 
16
  setup_environment()
17
 
 
18
  import gradio as gr
19
  from transformers import AutoTokenizer
20
  from model_utils.prm_model import PRM_MODEL
21
  from model_utils.io_utils import prepare_input, prepare_batch_input_for_model, derive_step_rewards
22
  import torch
23
 
 
24
  model_id = "Skywork/Skywork-o1-Open-PRM-Qwen-2.5-1.5B"
25
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
26
  model = PRM_MODEL.from_pretrained(model_id).to("cpu").eval()
 
28
  def evaluate(problem, response):
29
  try:
30
  processed_data = prepare_input(problem, response, tokenizer=tokenizer, step_token="\n")
31
+ input_ids, steps, reward_flags = [processed_data]input_ids, attention_mask, reward_flags = prepare_batch_input_for_model(
 
 
32
  input_ids,
33
  reward_flags,
34
  tokenizer.pad_token_id
 
46
  return_probs=True
47
  )
48
  step_rewards = derive_step_rewards(rewards, reward_flags)
49
+ #确保返回的是有效的JSON字符串
50
+ return json.dumps(step_rewards[0].tolist())
51
  except Exception as e:
52
+ return json.dumps({"error": str(e)})
53
 
54
  # 创建Gradio界面
55
  iface = gr.Interface(
 
58
  gr.Textbox(label="Problem", lines=4),
59
  gr.Textbox(label="Response", lines=8)
60
  ],
61
+ outputs=gr.JSON(),
62
  title="Problem Response Evaluation",
63
  description="Enter a problem and its response to get step-wise rewards",
64
  examples=[
65
  [
66
+ "Janet'sducks lay 16 eggs per day...",
67
+ "To determine how much money Janet makes..."
68
  ]
69
+ ],
70
+ cache_examples=False# 禁用示例缓存
71
  )
72
 
73
  # 启动接口
74
+ iface.launch(server_name="0.0.0.0")