AbdulmohsenA commited on
Commit
b412188
·
1 Parent(s): 4c56788
Files changed (1) hide show
  1. app.py +22 -8
app.py CHANGED
@@ -1,14 +1,28 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Load the translation pipeline
5
- model_name = "AbdulmohsenA/Faseeh" # Replace with your model name
6
- translator = pipeline("translation_en_to_ar", model=model_name)
7
 
8
- # Define translation function
9
- def translate_text(text):
10
- result = translator(text)
11
- return result[0]['translation_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # Gradio interface
14
  with gr.Blocks() as demo:
@@ -24,7 +38,7 @@ with gr.Blocks() as demo:
24
  translate_btn = gr.Button("Translate")
25
 
26
  # Button action
27
- translate_btn.click(translate_text, inputs=input_text, outputs=output_text)
28
 
29
  # Launch the Gradio app
30
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
+ import torch
6
+ import warnings
7
 
8
+ warnings.filterwarnings("ignore")
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ # Load the tokenizer and model
12
+ tokenizer = AutoTokenizer.from_pretrained("Abdulmohsena/Faseeh")
13
+ model = AutoModelForSeq2SeqLM.from_pretrained("Abdulmohsena/Faseeh").to(device)
14
+
15
+ def translate(text, temperature=0.1, tries=1):
16
+ inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=256).to(device)
17
+ outputs = model.generate(
18
+ **inputs,
19
+ do_sample=True,
20
+ temperature=temperature,
21
+ num_return_sequences=tries,
22
+ )
23
+ translation = tokenizer.batch_decode(outputs, skip_special_tokens=True)
24
+
25
+ return translation
26
 
27
  # Gradio interface
28
  with gr.Blocks() as demo:
 
38
  translate_btn = gr.Button("Translate")
39
 
40
  # Button action
41
+ translate_btn.click(translate, inputs=input_text, outputs=output_text)
42
 
43
  # Launch the Gradio app
44
  if __name__ == "__main__":