maiurilorenzo commited on
Commit
17cbbcb
·
verified ·
1 Parent(s): c13572e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+
4
+ # Load the model
5
+ model_name = "maiurilorenzo/misogyny-detection-it"
6
+ classifier = pipeline("text-classification", model=model_name)
7
+
8
+ # Define the prediction function
9
+ def detect_misogyny(text):
10
+ result = classifier(text)
11
+ label = result[0]["label"]
12
+ score = result[0]["score"]
13
+ label_readable = "Misogynistic" if label == "LABEL_1" else "Non-Misogynistic"
14
+ return f"Label: {label_readable} (Confidence: {score:.2f})"
15
+
16
+ # Create the Gradio interface
17
+ demo = gr.Interface(
18
+ fn=detect_misogyny,
19
+ inputs=gr.Textbox(lines=3, placeholder="Enter Italian text here..."),
20
+ outputs="text",
21
+ title="Misogyny Detection in Italian",
22
+ description="This demo uses a fine-tuned BERT model to detect misogynistic content in Italian text. Enter a phrase or sentence, and the model will classify it as 'Misogynistic' or 'Non-Misogynistic' along with a confidence score.",
23
+ article="""
24
+ ### About the Model
25
+ This model is fine-tuned on the AMI (Automatic Misogyny Identification) dataset for binary classification of misogynistic content in Italian.
26
+ - **Labels:**
27
+ - `1`: Misogynistic
28
+ - `0`: Non-Misogynistic
29
+ - **Source Model:** [dbmdz/bert-base-italian-xxl-uncased](https://huggingface.co/dbmdz/bert-base-italian-xxl-uncased)
30
+ """
31
+ )
32
+
33
+ demo.launch()