File size: 1,565 Bytes
9758705
 
b412188
 
 
527dced
 
 
9758705
b412188
 
 
 
 
527dced
 
 
b412188
527dced
 
 
 
 
 
 
 
 
b412188
527dced
9758705
 
 
be4f47d
9758705
 
 
 
 
be4f47d
9758705
 
 
 
 
527dced
9758705
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import gradio as gr
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
import warnings
import weave

weave.init("Faseeh")

warnings.filterwarnings("ignore")
device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("Abdulmohsena/Faseeh")
model = AutoModelForSeq2SeqLM.from_pretrained("Abdulmohsena/Faseeh").to(device)

class Faseeh(weave.Model):

    @weave.op()
    def translate(self, input_data: str, temperature: float = 0.1):
        inputs = tokenizer(input_data, return_tensors='pt')
        outputs = model.generate(**inputs,
                                 temperature=temperature,
                                 do_sample=True)[0]
        prediction = tokenizer.decode(outputs, skip_special_tokens=True)
            
        return prediction

faseeh = Faseeh()

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Faseeh: The Elequent Arabic Translator")
    
    # Input text box
    input_text = gr.Textbox(label="Input Text", placeholder="Enter text to translate from English to Classical Arabic")

    # Output text box
    output_text = gr.Textbox(label="ادخل نصا لتترجمه من الأنجليزية الى العربية الفصيحة")

    # Button to trigger translation
    translate_btn = gr.Button("Translate")

    # Button action
    translate_btn.click(faseeh.translate, inputs=input_text, outputs=output_text)

# Launch the Gradio app
if __name__ == "__main__":
    demo.launch()