KavinduHansaka commited on
Commit
4ad87c5
·
verified ·
1 Parent(s): e00c4c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -6
app.py CHANGED
@@ -5,23 +5,38 @@ import pandas as pd
5
  # Load model once
6
  model = Detoxify('original')
7
 
 
 
 
8
  def classify_multiple(comments):
9
- # Split input by newlines and clean
10
  comment_list = [c.strip() for c in comments.split('\n') if c.strip()]
11
  if not comment_list:
12
  return "Please enter at least one valid comment."
13
 
14
- results = model.predict(comment_list) # Returns a dict of lists
15
-
16
  df = pd.DataFrame(results, index=comment_list).round(4)
 
 
 
 
 
 
 
 
 
 
 
17
  return df
18
 
 
19
  iface = gr.Interface(
20
  fn=classify_multiple,
21
  inputs=gr.Textbox(lines=8, placeholder="Enter one or more comments, each on a new line..."),
22
- outputs=gr.Dataframe(label="Toxicity Predictions"),
23
- title="💬 Toxic Comment Classifier (Multi-Comment)",
24
- description="Paste one or more comments. Each will be scored for toxicity, severe toxicity, insult, threat, obscene, and identity hate using Detoxify."
25
  )
26
 
27
  if __name__ == "__main__":
 
5
  # Load model once
6
  model = Detoxify('original')
7
 
8
+ # Threshold for flagging a comment as risky
9
+ TOXICITY_THRESHOLD = 0.7
10
+
11
  def classify_multiple(comments):
12
+ # Split input into lines
13
  comment_list = [c.strip() for c in comments.split('\n') if c.strip()]
14
  if not comment_list:
15
  return "Please enter at least one valid comment."
16
 
17
+ # Predict toxicity scores
18
+ results = model.predict(comment_list)
19
  df = pd.DataFrame(results, index=comment_list).round(4)
20
+
21
+ # Capitalize headers
22
+ df.columns = [col.replace("_", " ").title().replace(" ", "_") for col in df.columns]
23
+ df.columns = [col.replace("_", " ") for col in df.columns]
24
+
25
+ # Add warning column
26
+ def check_warning(row):
27
+ return "⚠️ High Risk" if any(score > TOXICITY_THRESHOLD for score in row) else "✅ Safe"
28
+
29
+ df["⚠️ Warning"] = df.apply(check_warning, axis=1)
30
+
31
  return df
32
 
33
+ # UI setup
34
  iface = gr.Interface(
35
  fn=classify_multiple,
36
  inputs=gr.Textbox(lines=8, placeholder="Enter one or more comments, each on a new line..."),
37
+ outputs=gr.Dataframe(label="Toxicity Predictions with Warnings"),
38
+ title="💬 Toxic Comment Classifier (Multi-Comment, with Warnings)",
39
+ description="Paste one or more comments. Each comment is scored for toxicity, and flagged as ⚠️ if any label exceeds 0.7."
40
  )
41
 
42
  if __name__ == "__main__":