lg3394 commited on
Commit
cd880dc
·
verified ·
1 Parent(s): 5ec2d79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -105,7 +105,11 @@ def moderate_text(user_text):
105
  toxic_classification = "Blocked" if toxic_result[0]['label'] == 'LABEL_1' else "Allowed" # Toxic BERT classifies as "LABEL_1" for toxic
106
  toxic_severity = toxic_result[0]['score']
107
 
108
- return openai_moderation_result, anthropic_moderation_result, azure_moderation_result, f"Toxic BERT classification: {toxic_classification}, Severity: {toxic_severity}"
 
 
 
 
109
 
110
  # Create the Gradio interface with updated input and output labels
111
  iface = gr.Interface(
 
105
  toxic_classification = "Blocked" if toxic_result[0]['label'] == 'LABEL_1' else "Allowed" # Toxic BERT classifies as "LABEL_1" for toxic
106
  toxic_severity = toxic_result[0]['score']
107
 
108
+ # Explanation of severity score
109
+ toxic_explanation = f"Toxic BERT classification: {toxic_classification}, Confidence: {toxic_severity:.2f}"
110
+
111
+ return openai_moderation_result, anthropic_moderation_result, azure_moderation_result, toxic_explanation
112
+
113
 
114
  # Create the Gradio interface with updated input and output labels
115
  iface = gr.Interface(