KavinduHansaka commited on
Commit
2e4f9f7
·
verified ·
1 Parent(s): e311ddb

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from detoxify import Detoxify
3
+ import pandas as pd
4
+
5
+ # Load model once at startup
6
+ model = Detoxify('original')
7
+
8
+ def classify_toxicity(text):
9
+ if not text.strip():
10
+ return "Please enter a valid comment."
11
+
12
+ results = model.predict(text)
13
+ df = pd.DataFrame([results], index=["Score"]).T
14
+ df = df.round(4)
15
+ return df
16
+
17
+ iface = gr.Interface(
18
+ fn=classify_toxicity,
19
+ inputs=gr.Textbox(label="Enter a Comment", lines=3, placeholder="Example: You are so annoying..."),
20
+ outputs=gr.Dataframe(label="Toxicity Classification"),
21
+ title="💬 Toxic Comment Classifier",
22
+ description="Classifies input comments as toxic, severe toxic, obscene, threat, insult, and identity hate using Detoxify (unitary/toxic-bert)."
23
+ )
24
+
25
+ if __name__ == "__main__":
26
+ iface.launch()