gokilashree commited on
Commit
3b844d5
·
verified ·
1 Parent(s): c24784b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -9
app.py CHANGED
@@ -1,27 +1,53 @@
 
1
  from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
2
  from transformers import MBartForConditionalGeneration, MBart50Tokenizer
3
  import gradio as gr
4
  import requests
5
  import io
6
  from PIL import Image
 
7
 
 
8
  !pip install torch --upgrade
 
9
  !pip install transformers
10
- !pip install sentencepiece
11
- !pip install huggingface_hub
12
- !pip install gradio --upgrade
13
- !pip install diffusers
14
  !pip install pillow
15
 
16
-
17
- # Load Facebook's mbart-large-50 model and tokenizer
18
  model_name = "facebook/mbart-large-50-many-to-one-mmt"
19
- … fn=translate_and_generate_image,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
21
  outputs=[gr.Textbox(label="Translated English Text"), gr.Image(label="Generated Image")],
22
  title="Tamil to English Translation and Image Generation",
23
  description="Translate Tamil text to English using Facebook's mbart-large-50 model and generate an image using the translated text as the prompt.",
24
  )
25
 
26
- # Launch Gradio app with shareable link
27
- iface.launch(share=True)
 
1
+ # Import required libraries
2
  from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
3
  from transformers import MBartForConditionalGeneration, MBart50Tokenizer
4
  import gradio as gr
5
  import requests
6
  import io
7
  from PIL import Image
8
+ import os # Import os to access environment variables
9
 
10
+ # Install necessary packages (Google Colab-specific; ignore if running in another environment)
11
  !pip install torch --upgrade
12
+ !pip install gradio
13
  !pip install transformers
 
 
 
 
14
  !pip install pillow
15
 
16
+ # Load the models and tokenizers
 
17
  model_name = "facebook/mbart-large-50-many-to-one-mmt"
18
+ tokenizer = MBart50Tokenizer.from_pretrained(model_name)
19
+ model = MBartForConditionalGeneration.from_pretrained(model_name)
20
+
21
+ # Use the Hugging Face API key from environment variables
22
+ API_URL = "https://api-inference.huggingface.co/models/ZB-Tech/Text-to-Image"
23
+ headers = {"Authorization": f"Bearer {os.getenv('HF_API_KEY')}"}
24
+
25
+ # Define the function to translate Tamil text and generate an image
26
+ def translate_and_generate_image(tamil_text):
27
+ # Step 1: Translate Tamil text to English
28
+ tokenizer.src_lang = "ta_IN"
29
+ inputs = tokenizer(tamil_text, return_tensors="pt")
30
+ translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
31
+ translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
32
+
33
+ # Step 2: Use the translated English text to generate an image
34
+ def query(payload):
35
+ response = requests.post(API_URL, headers=headers, json=payload)
36
+ return response.content
37
+
38
+ image_bytes = query({"inputs": translated_text})
39
+ image = Image.open(io.BytesIO(image_bytes))
40
+
41
+ return translated_text, image
42
+
43
+ # Gradio interface setup
44
+ iface = gr.Interface(
45
+ fn=translate_and_generate_image,
46
  inputs=gr.Textbox(lines=2, placeholder="Enter Tamil text here..."),
47
  outputs=[gr.Textbox(label="Translated English Text"), gr.Image(label="Generated Image")],
48
  title="Tamil to English Translation and Image Generation",
49
  description="Translate Tamil text to English using Facebook's mbart-large-50 model and generate an image using the translated text as the prompt.",
50
  )
51
 
52
+ # Launch Gradio app with a shareable link
53
+ iface.launch(share=True)