kz209 commited on
Commit
dea4ce7
·
1 Parent(s): 107fbeb
pages/batch_evaluation.py CHANGED
@@ -52,7 +52,7 @@ def process(seed, model_selection, prompt, num=10):
52
  for data in random.choices(dataset, k=num):
53
  dialogue = data['dialogue']
54
  summary = data['summary']
55
- response = generate_answer(dialogue, model, model_selection, prompt)
56
 
57
  rouge_score = metric_rouge_score(response, summary)
58
 
 
52
  for data in random.choices(dataset, k=num):
53
  dialogue = data['dialogue']
54
  summary = data['summary']
55
+ response = generate_answer(dialogue, model_selection, prompt)
56
 
57
  rouge_score = metric_rouge_score(response, summary)
58
 
pages/summarization_playground.py CHANGED
@@ -31,7 +31,7 @@ Back in Boston, Kidd is going to rely on Lively even more. He'll play close to 3
31
  random_label: ""
32
  }
33
 
34
- def generate_answer(sources, model, model_name, prompt):
35
  content = prompt + '\n' + sources + '\n\n'
36
  global __model_on_gpu__
37
 
@@ -50,7 +50,7 @@ def generate_answer(sources, model, model_name, prompt):
50
 
51
  def process_input(input_text, model_selection, prompt):
52
  if input_text:
53
- response = generate_answer(input_text, model, model_selection, prompt)
54
  return f"## Original Article:\n\n{input_text}\n\n## Summarization:\n\n{response}"
55
  else:
56
  return "Please fill the input to generate outputs."
 
31
  random_label: ""
32
  }
33
 
34
+ def generate_answer(sources, model_name, prompt):
35
  content = prompt + '\n' + sources + '\n\n'
36
  global __model_on_gpu__
37
 
 
50
 
51
  def process_input(input_text, model_selection, prompt):
52
  if input_text:
53
+ response = generate_answer(input_text, model_selection, prompt)
54
  return f"## Original Article:\n\n{input_text}\n\n## Summarization:\n\n{response}"
55
  else:
56
  return "Please fill the input to generate outputs."