Spaces:
Sleeping
Sleeping
Delete app_default.py
Browse files- app_default.py +0 -213
app_default.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
# app.py
|
2 |
-
import gradio as gr
|
3 |
-
from classifier import classify_toxic_comment
|
4 |
-
|
5 |
-
# Clear function for resetting the UI
|
6 |
-
def clear_inputs():
|
7 |
-
return "", 0, "", [], "", "", "", "", 0, "", "", "", "", ""
|
8 |
-
|
9 |
-
# Custom CSS for styling
|
10 |
-
custom_css = """
|
11 |
-
.gr-button-primary {
|
12 |
-
background-color: #4CAF50 !important;
|
13 |
-
color: white !important;
|
14 |
-
}
|
15 |
-
.gr-button-secondary {
|
16 |
-
background-color: #f44336 !important;
|
17 |
-
color: white !important;
|
18 |
-
}
|
19 |
-
.gr-textbox textarea {
|
20 |
-
border: 2px solid #2196F3 !important;
|
21 |
-
border-radius: 8px !important;
|
22 |
-
}
|
23 |
-
.gr-slider {
|
24 |
-
background-color: #e0e0e0 !important;
|
25 |
-
border-radius: 10px !important;
|
26 |
-
}
|
27 |
-
"""
|
28 |
-
|
29 |
-
# Main UI function
|
30 |
-
with gr.Blocks(theme=gr.themes.Soft(), css=custom_css) as demo:
|
31 |
-
gr.Markdown(
|
32 |
-
"""
|
33 |
-
# Toxic Comment Classifier
|
34 |
-
Enter a comment below to check if it's toxic or non-toxic. This app uses a fine-tuned XLM-RoBERTa model to classify comments as part of a four-stage pipeline for automated toxic comment moderation.
|
35 |
-
"""
|
36 |
-
)
|
37 |
-
|
38 |
-
with gr.Row():
|
39 |
-
with gr.Column(scale=3):
|
40 |
-
comment_input = gr.Textbox(
|
41 |
-
label="Your Comment",
|
42 |
-
placeholder="Type your comment here...",
|
43 |
-
lines=3,
|
44 |
-
max_lines=5
|
45 |
-
)
|
46 |
-
with gr.Column(scale=1):
|
47 |
-
submit_btn = gr.Button("Classify Comment", variant="primary")
|
48 |
-
clear_btn = gr.Button("Clear", variant="secondary")
|
49 |
-
|
50 |
-
gr.Examples(
|
51 |
-
examples=[
|
52 |
-
"I love this community, it's so supportive!",
|
53 |
-
"You are an idiot and should leave this platform.",
|
54 |
-
"This app is amazing, great work!"
|
55 |
-
],
|
56 |
-
inputs=comment_input,
|
57 |
-
label="Try these examples:"
|
58 |
-
)
|
59 |
-
|
60 |
-
with gr.Row():
|
61 |
-
with gr.Column(scale=2):
|
62 |
-
prediction_output = gr.Textbox(label="Prediction", placeholder="Prediction will appear here...")
|
63 |
-
toxicity_output = gr.Textbox(label="Toxicity Score", placeholder="Toxicity score will appear here...")
|
64 |
-
bias_output = gr.Textbox(label="Bias Score", placeholder="Bias score will appear here...")
|
65 |
-
with gr.Column(scale=1):
|
66 |
-
confidence_output = gr.Slider(
|
67 |
-
label="Confidence",
|
68 |
-
minimum=0,
|
69 |
-
maximum=1,
|
70 |
-
value=0,
|
71 |
-
interactive=False
|
72 |
-
)
|
73 |
-
|
74 |
-
with gr.Row():
|
75 |
-
label_display = gr.HTML()
|
76 |
-
threshold_display = gr.HTML()
|
77 |
-
|
78 |
-
with gr.Accordion("Paraphrased Output (if Toxic)", open=False):
|
79 |
-
paraphrased_comment_output = gr.Textbox(label="Paraphrased Comment", placeholder="Paraphrased comment will appear here if the input is toxic...")
|
80 |
-
paraphrased_prediction_output = gr.Textbox(label="Paraphrased Prediction", placeholder="Prediction will appear here...")
|
81 |
-
paraphrased_toxicity_output = gr.Textbox(label="Paraphrased Toxicity Score", placeholder="Toxicity score will appear here...")
|
82 |
-
paraphrased_bias_output = gr.Textbox(label="Paraphrased Bias Score", placeholder="Bias score will appear here...")
|
83 |
-
paraphrased_confidence_output = gr.Slider(
|
84 |
-
label="Paraphrased Confidence",
|
85 |
-
minimum=0,
|
86 |
-
maximum=1,
|
87 |
-
value=0,
|
88 |
-
interactive=False
|
89 |
-
)
|
90 |
-
paraphrased_label_display = gr.HTML()
|
91 |
-
semantic_similarity_output = gr.Textbox(label="Semantic Similarity", placeholder="Semantic similarity score will appear here...")
|
92 |
-
emotion_shift_output = gr.Textbox(label="Emotion Shift", placeholder="Emotion shift will appear here...")
|
93 |
-
empathy_score_output = gr.Textbox(label="Empathy Score", placeholder="Empathy score will appear here...")
|
94 |
-
|
95 |
-
with gr.Accordion("Prediction History", open=False):
|
96 |
-
history_output = gr.JSON(label="Previous Predictions")
|
97 |
-
|
98 |
-
with gr.Accordion("Provide Feedback", open=False):
|
99 |
-
feedback_input = gr.Radio(
|
100 |
-
choices=["Yes, the prediction was correct", "No, the prediction was incorrect"],
|
101 |
-
label="Was this prediction correct?"
|
102 |
-
)
|
103 |
-
feedback_comment = gr.Textbox(label="Additional Comments (optional)", placeholder="Let us know your thoughts...")
|
104 |
-
feedback_submit = gr.Button("Submit Feedback")
|
105 |
-
feedback_output = gr.Textbox(label="Feedback Status")
|
106 |
-
|
107 |
-
def handle_classification(comment, history):
|
108 |
-
if history is None:
|
109 |
-
history = []
|
110 |
-
(
|
111 |
-
prediction, confidence, color, toxicity_score, bias_score,
|
112 |
-
paraphrased_comment, paraphrased_prediction, paraphrased_confidence,
|
113 |
-
paraphrased_color, paraphrased_toxicity_score, paraphrased_bias_score,
|
114 |
-
semantic_similarity, emotion_shift, empathy_score
|
115 |
-
) = classify_toxic_comment(comment)
|
116 |
-
|
117 |
-
history.append({
|
118 |
-
"comment": comment,
|
119 |
-
"prediction": prediction,
|
120 |
-
"confidence": confidence,
|
121 |
-
"toxicity_score": toxicity_score,
|
122 |
-
"bias_score": bias_score,
|
123 |
-
"paraphrased_comment": paraphrased_comment,
|
124 |
-
"paraphrased_prediction": paraphrased_prediction,
|
125 |
-
"paraphrased_confidence": paraphrased_confidence,
|
126 |
-
"paraphrased_toxicity_score": paraphrased_toxicity_score,
|
127 |
-
"paraphrased_bias_score": paraphrased_bias_score,
|
128 |
-
"semantic_similarity": semantic_similarity,
|
129 |
-
"emotion_shift": emotion_shift,
|
130 |
-
"empathy_score": empathy_score
|
131 |
-
})
|
132 |
-
|
133 |
-
threshold_message = "High Confidence" if confidence >= 0.7 else "Low Confidence"
|
134 |
-
threshold_color = "green" if confidence >= 0.7 else "orange"
|
135 |
-
toxicity_display = f"{toxicity_score} (Scale: 0 to 1, lower is less toxic)" if toxicity_score is not None else "N/A"
|
136 |
-
bias_display = f"{bias_score} (Scale: 0 to 1, lower indicates less bias)" if bias_score is not None else "N/A"
|
137 |
-
|
138 |
-
paraphrased_comment_display = paraphrased_comment if paraphrased_comment else "N/A (Comment was non-toxic)"
|
139 |
-
paraphrased_prediction_display = paraphrased_prediction if paraphrased_prediction else "N/A"
|
140 |
-
paraphrased_confidence_display = paraphrased_confidence if paraphrased_confidence else 0
|
141 |
-
paraphrased_toxicity_display = f"{paraphrased_toxicity_score} (Scale: 0 to 1, lower is less toxic)" if paraphrased_toxicity_score is not None else "N/A"
|
142 |
-
paraphrased_bias_display = f"{paraphrased_bias_score} (Scale: 0 to 1, lower indicates less bias)" if paraphrased_bias_score is not None else "N/A"
|
143 |
-
paraphrased_label_html = f"<span style='color: {paraphrased_color}; font-size: 20px; font-weight: bold;'>{paraphrased_prediction}</span>" if paraphrased_prediction else ""
|
144 |
-
semantic_similarity_display = f"{semantic_similarity} (Scale: 0 to 1, higher is better)" if semantic_similarity is not None else "N/A"
|
145 |
-
emotion_shift_display = emotion_shift if emotion_shift else "N/A"
|
146 |
-
empathy_score_display = f"{empathy_score} (Scale: 0 to 1, higher indicates more empathy)" if empathy_score is not None else "N/A"
|
147 |
-
|
148 |
-
return (
|
149 |
-
prediction, confidence, color, history, threshold_message, threshold_color,
|
150 |
-
toxicity_display, bias_display,
|
151 |
-
paraphrased_comment_display, paraphrased_prediction_display, paraphrased_confidence_display,
|
152 |
-
paraphrased_toxicity_display, paraphrased_bias_display, paraphrased_label_html,
|
153 |
-
semantic_similarity_display, emotion_shift_display, empathy_score_display
|
154 |
-
)
|
155 |
-
|
156 |
-
def handle_feedback(feedback, comment):
|
157 |
-
return f"Thank you for your feedback: {feedback}\nAdditional comment: {comment}"
|
158 |
-
|
159 |
-
submit_btn.click(
|
160 |
-
fn=lambda: ("Classifying...", 0, "", None, "", "", "Calculating...", "Calculating...", "Paraphrasing...", "Calculating...", 0, "Calculating...", "Calculating...", "", "Calculating...", "Calculating...", "Calculating..."), # Show loading state
|
161 |
-
inputs=[],
|
162 |
-
outputs=[
|
163 |
-
prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display,
|
164 |
-
toxicity_output, bias_output,
|
165 |
-
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
166 |
-
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
167 |
-
semantic_similarity_output, emotion_shift_output, empathy_score_output
|
168 |
-
]
|
169 |
-
).then(
|
170 |
-
fn=handle_classification,
|
171 |
-
inputs=[comment_input, history_output],
|
172 |
-
outputs=[
|
173 |
-
prediction_output, confidence_output, label_display, history_output, threshold_display, threshold_display,
|
174 |
-
toxicity_output, bias_output,
|
175 |
-
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
176 |
-
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
177 |
-
semantic_similarity_output, emotion_shift_output, empathy_score_output
|
178 |
-
]
|
179 |
-
).then(
|
180 |
-
fn=lambda prediction, confidence, color: f"<span style='color: {color}; font-size: 20px; font-weight: bold;'>{prediction}</span>",
|
181 |
-
inputs=[prediction_output, confidence_output, label_display],
|
182 |
-
outputs=label_display
|
183 |
-
).then(
|
184 |
-
fn=lambda threshold_message, threshold_color: f"<span style='color: {threshold_color}; font-size: 16px;'>{threshold_message}</span>",
|
185 |
-
inputs=[threshold_display, threshold_display],
|
186 |
-
outputs=threshold_display
|
187 |
-
)
|
188 |
-
|
189 |
-
feedback_submit.click(
|
190 |
-
fn=handle_feedback,
|
191 |
-
inputs=[feedback_input, feedback_comment],
|
192 |
-
outputs=feedback_output
|
193 |
-
)
|
194 |
-
|
195 |
-
clear_btn.click(
|
196 |
-
fn=clear_inputs,
|
197 |
-
inputs=[],
|
198 |
-
outputs=[
|
199 |
-
comment_input, confidence_output, label_display, history_output, toxicity_output, bias_output,
|
200 |
-
paraphrased_comment_output, paraphrased_prediction_output, paraphrased_confidence_output,
|
201 |
-
paraphrased_toxicity_output, paraphrased_bias_output, paraphrased_label_display,
|
202 |
-
semantic_similarity_output, emotion_shift_output, empathy_score_output
|
203 |
-
]
|
204 |
-
)
|
205 |
-
|
206 |
-
gr.Markdown(
|
207 |
-
"""
|
208 |
-
---
|
209 |
-
**About**: This app is part of a four-stage pipeline for automated toxic comment moderation with emotional intelligence via RLHF. Built with ❤️ using Hugging Face and Gradio.
|
210 |
-
"""
|
211 |
-
)
|
212 |
-
|
213 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|