File size: 1,393 Bytes
9758705
 
 
b412188
 
 
9758705
b412188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9758705
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b412188
9758705
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from transformers import pipeline

from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
import warnings

warnings.filterwarnings("ignore")
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("Abdulmohsena/Faseeh")
model = AutoModelForSeq2SeqLM.from_pretrained("Abdulmohsena/Faseeh").to(device)

def translate(text, temperature=0.1, tries=1):
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=256).to(device)
    outputs = model.generate(
        **inputs,
        do_sample=True,
        temperature=temperature,
        num_return_sequences=tries,
    )
    translation = tokenizer.batch_decode(outputs, skip_special_tokens=True)

    return translation

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Machine Translation to Classical Arabic")
    
    # Input text box
    input_text = gr.Textbox(label="Input Text", placeholder="Enter text to translate from English to Classical Arabic")

    # Output text box
    output_text = gr.Textbox(label="Translated Text")

    # Button to trigger translation
    translate_btn = gr.Button("Translate")

    # Button action
    translate_btn.click(translate, inputs=input_text, outputs=output_text)

# Launch the Gradio app
if __name__ == "__main__":
    demo.launch()