bergr7f commited on
Commit
061ae36
1 Parent(s): eb8daf9

zerogpu decorator

Browse files
Files changed (1) hide show
  1. app.py +14 -7
app.py CHANGED
@@ -1,13 +1,18 @@
1
  import gradio as gr
 
2
  import pandas as pd
3
  from typing import List, Dict
4
  from flow_judge import Hf, FlowJudge, EvalInput
5
  from flow_judge.metrics import CustomMetric, RubricItem
6
 
7
- try:
8
- model = Hf(flash_attn=False)
9
- except Exception as e:
10
- raise RuntimeError(f"Failed to initialize Hf Model: {e}")
 
 
 
 
11
 
12
  EXAMPLES = [
13
  {
@@ -33,8 +38,9 @@ def populate_fields(example_index: int):
33
  example["evaluation_criteria"],
34
  [[str(i), description] for i, description in enumerate(example["rubric"])]
35
  )
36
-
37
- def evaluate(task_inputs: pd.DataFrame, task_output: pd.DataFrame, evaluation_criteria: str, rubric: pd.DataFrame) -> tuple:
 
38
  # Convert inputs to the expected format
39
  eval_input = EvalInput(
40
  inputs=[{row['Name']: row['Value']} for _, row in task_inputs.iterrows()],
@@ -94,6 +100,7 @@ def reset_evaluation_criteria():
94
  )
95
 
96
  with gr.Blocks() as demo:
 
97
  with gr.Row():
98
  example_buttons = [gr.Button(f"{example['emoji']} Example {i+1}") for i, example in enumerate(EXAMPLES)]
99
 
@@ -177,7 +184,7 @@ with gr.Blocks() as demo:
177
 
178
  evaluate_btn.click(
179
  evaluate,
180
- inputs=[task_inputs, task_output, evaluation_criteria, rubric],
181
  outputs=[feedback, score]
182
  )
183
 
 
1
  import gradio as gr
2
+ import spaces
3
  import pandas as pd
4
  from typing import List, Dict
5
  from flow_judge import Hf, FlowJudge, EvalInput
6
  from flow_judge.metrics import CustomMetric, RubricItem
7
 
8
+
9
+ @spaces.GPU
10
+ def load_model():
11
+ try:
12
+ model = Hf(flash_attn=False)
13
+ return model
14
+ except Exception as e:
15
+ raise RuntimeError(f"Failed to initialize Hf Model: {e}")
16
 
17
  EXAMPLES = [
18
  {
 
38
  example["evaluation_criteria"],
39
  [[str(i), description] for i, description in enumerate(example["rubric"])]
40
  )
41
+
42
+ @spaces.GPU
43
+ def evaluate(model, task_inputs: pd.DataFrame, task_output: pd.DataFrame, evaluation_criteria: str, rubric: pd.DataFrame) -> tuple:
44
  # Convert inputs to the expected format
45
  eval_input = EvalInput(
46
  inputs=[{row['Name']: row['Value']} for _, row in task_inputs.iterrows()],
 
100
  )
101
 
102
  with gr.Blocks() as demo:
103
+ model = load_model()
104
  with gr.Row():
105
  example_buttons = [gr.Button(f"{example['emoji']} Example {i+1}") for i, example in enumerate(EXAMPLES)]
106
 
 
184
 
185
  evaluate_btn.click(
186
  evaluate,
187
+ inputs=[model,task_inputs, task_output, evaluation_criteria, rubric],
188
  outputs=[feedback, score]
189
  )
190