File size: 1,583 Bytes
e78e1ce
 
 
 
 
 
 
 
 
4201432
e78e1ce
 
cc094ae
e78e1ce
 
cc094ae
e78e1ce
 
329d17b
e78e1ce
cc094ae
e78e1ce
cc094ae
 
 
 
 
e78e1ce
 
 
 
 
 
 
5595cce
 
 
e78e1ce
 
 
 
 
 
 
 
 
4201432
e78e1ce
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
# -*- coding: utf-8 -*-
"""Application.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/148du8431_JkTaH-totdocC2aUXzOWimL
"""

import gradio as gr
from transformers import BertTokenizer, TFBertForSequenceClassification
import tensorflow as tf
#from transformers import pipeline

# Load tokenizer
tokenizer = BertTokenizer.from_pretrained("nlpaueb/bert-base-greek-uncased-v1")

# Load model
model = TFBertForSequenceClassification.from_pretrained('fine-tuned-bert-gr.h5')

#pipeline= pipeline(task="text-classification", model="https://huggingface.co/spaces/Kleo/Sarcasm/blob/main/tf_model.h5")
def check_sarcasm(sentence):
    tf_batch = tokenizer(sentence, max_length=128, padding=True, truncation=True, return_tensors='tf')
    tf_outputs = model(tf_batch.input_ids, tf_batch.token_type_ids)
    tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1)
    pred_label = tf.argmax(tf_predictions, axis=1)
    #pred_label=pipeline(sentence)

    if pred_label == 1:
        return "Sarcastic"
    else:
        return "Not sarcastic"

# Example usage
#sentence = "Μεξικό: 25 νεκροί από την πτώση λεωφορείου στον γκρεμό"
#result = check_sarcasm(sentence)
#print(result)


# Create a Gradio interface
iface = gr.Interface(
    fn=check_sarcasm,
    inputs="text",
    outputs="text",
    title="Sarcasm Detection",
    server_name="0.0.0.0",
    description="Enter a headline from the Greek news and check if it's sarcastic."
)

# Launch the interface
iface.launch()