sileod commited on
Commit
df02321
·
verified ·
1 Parent(s): b0bab2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -127,7 +127,7 @@ def process_input(text_input, labels_or_premise, mode):
127
  # Global prediction
128
  global_pred = nli_classifier([{"text": text_input, "text_pair": labels_or_premise}], return_all_scores=True)[0]
129
  global_results = {pred['label']: pred['score'] for pred in global_pred}
130
- global_label = max(global_results.items(), key=lambda x: x[1])[0]
131
 
132
  # Sentence-level analysis
133
  sentences = sent_tokenize(text_input)
@@ -143,7 +143,7 @@ def process_input(text_input, labels_or_premise, mode):
143
  'scores': sent_scores
144
  })
145
 
146
- analysis_html = create_analysis_html(sentence_results, global_label)
147
  return global_results, analysis_html
148
 
149
  def update_interface(mode):
@@ -168,7 +168,7 @@ def update_interface(mode):
168
  else: # Long Context NLI
169
  return (
170
  gr.update(
171
- label="🔎 Global Hypothesis",
172
  placeholder="Enter a hypothesis to test against the full context...",
173
  value=long_context_examples[0][1]
174
  ),
 
127
  # Global prediction
128
  global_pred = nli_classifier([{"text": text_input, "text_pair": labels_or_premise}], return_all_scores=True)[0]
129
  global_results = {pred['label']: pred['score'] for pred in global_pred}
130
+ global_label, global_confidence = max(global_results.items(), key=lambda x: x[1])
131
 
132
  # Sentence-level analysis
133
  sentences = sent_tokenize(text_input)
 
143
  'scores': sent_scores
144
  })
145
 
146
+ analysis_html = create_analysis_html(sentence_results, global_label,global_confidence)
147
  return global_results, analysis_html
148
 
149
  def update_interface(mode):
 
168
  else: # Long Context NLI
169
  return (
170
  gr.update(
171
+ label="🔎 Hypothesis",
172
  placeholder="Enter a hypothesis to test against the full context...",
173
  value=long_context_examples[0][1]
174
  ),