Santhosh54321 commited on
Commit
8538d63
Β·
verified Β·
1 Parent(s): 3695c8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -62
app.py CHANGED
@@ -1,4 +1,3 @@
1
- import os
2
  import requests
3
  import streamlit as st
4
  from PIL import Image
@@ -23,41 +22,48 @@ groq_headers = {
23
 
24
  # Function to query Hugging Face model for image generation
25
  def query_huggingface(payload):
26
- response = requests.post(HF_API_URL, headers=hf_headers, json=payload)
27
- if response.status_code != 200:
28
- st.error(f"Error: {response.status_code} - {response.text}")
 
 
 
29
  return None
30
- return response.content
31
 
32
  # Function to generate text using Groq API
33
  def generate_response(prompt):
34
- payload = {
35
- "model": "mixtral-8x7b-32768",
36
- "messages": [
37
- {"role": "system", "content": "You are a helpful assistant."},
38
- {"role": "user", "content": prompt}
39
- ],
40
- "max_tokens": 100,
41
- "temperature": 0.7
42
- }
43
- response = requests.post(groq_url, json=payload, headers=groq_headers)
44
- if response.status_code == 200:
45
- result = response.json()
46
- return result['choices'][0]['message']['content']
47
- else:
48
- st.error(f"Error: {response.status_code} - {response.text}")
 
 
49
  return None
50
 
51
  # Function to translate Tamil to English using MBart model
52
  def translate_tamil_to_english(tamil_text):
53
- model_name = "facebook/mbart-large-50-many-to-one-mmt"
54
- model = MBartForConditionalGeneration.from_pretrained(model_name)
55
- tokenizer = MBart50Tokenizer.from_pretrained(model_name, src_lang="ta_IN")
56
-
57
- inputs = tokenizer(tamil_text, return_tensors="pt")
58
- translated = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
59
- translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
60
- return translated_text
 
 
 
61
 
62
  # Main function to generate text and image
63
  def generate_image_and_text(user_input):
@@ -67,40 +73,36 @@ def generate_image_and_text(user_input):
67
  # Translate Tamil to English
68
  english_input = translate_tamil_to_english(user_input)
69
  if not english_input:
70
- st.error("Error in translation.")
71
- return
72
-
73
- # Generate text description (100 tokens) and image prompt (30 tokens) using Groq API
74
- full_text_description = generate_response(english_input)
75
- if not full_text_description:
76
- st.error("Error in text generation.")
77
- return
78
-
79
- # Create image prompt based on the full text description
80
- image_prompt = generate_response(f"Create a concise image prompt from the following text: {full_text_description}")
81
- if not image_prompt:
82
- st.error("Error in generating image prompt.")
83
- return
84
-
85
- # Request an image based on the generated image prompt
86
- image_data = query_huggingface({"inputs": image_prompt})
87
- if not image_data:
88
- st.error("Error in image generation.")
89
- return
90
-
91
- # Display the results
92
- st.markdown("### Translated English Text:")
93
- st.write(english_input)
94
-
95
- st.markdown("### Generated Text Response:")
96
- st.write(full_text_description)
97
-
98
- try:
99
- # Load and display the image
100
- image = Image.open(BytesIO(image_data))
101
- st.image(image, caption="Generated Image", use_column_width=True)
102
- except Exception as e:
103
- st.error(f"Failed to display image: {e}")
104
 
105
  # Streamlit interface
106
  st.title("Multi-Modal Generator (Tamil to English)")
 
 
1
  import requests
2
  import streamlit as st
3
  from PIL import Image
 
22
 
23
  # Function to query Hugging Face model for image generation
24
  def query_huggingface(payload):
25
+ try:
26
+ response = requests.post(HF_API_URL, headers=hf_headers, json=payload)
27
+ if response.status_code != 200:
28
+ return None
29
+ return response.content
30
+ except Exception:
31
  return None
 
32
 
33
  # Function to generate text using Groq API
34
  def generate_response(prompt):
35
+ try:
36
+ payload = {
37
+ "model": "mixtral-8x7b-32768",
38
+ "messages": [
39
+ {"role": "system", "content": "You are a helpful assistant."},
40
+ {"role": "user", "content": prompt}
41
+ ],
42
+ "max_tokens": 100,
43
+ "temperature": 0.7
44
+ }
45
+ response = requests.post(groq_url, json=payload, headers=groq_headers)
46
+ if response.status_code == 200:
47
+ result = response.json()
48
+ return result['choices'][0]['message']['content']
49
+ else:
50
+ return None
51
+ except Exception:
52
  return None
53
 
54
  # Function to translate Tamil to English using MBart model
55
  def translate_tamil_to_english(tamil_text):
56
+ try:
57
+ model_name = "facebook/mbart-large-50-many-to-one-mmt"
58
+ model = MBartForConditionalGeneration.from_pretrained(model_name)
59
+ tokenizer = MBart50Tokenizer.from_pretrained(model_name, src_lang="ta_IN")
60
+
61
+ inputs = tokenizer(tamil_text, return_tensors="pt")
62
+ translated = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
63
+ translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
64
+ return translated_text
65
+ except Exception:
66
+ return None
67
 
68
  # Main function to generate text and image
69
  def generate_image_and_text(user_input):
 
73
  # Translate Tamil to English
74
  english_input = translate_tamil_to_english(user_input)
75
  if not english_input:
76
+ st.warning("Sorry, the translation model is unavailable right now πŸ˜₯πŸ˜₯πŸ˜₯. Please try again later.")
77
+ else:
78
+ st.markdown("### Translated English Text:")
79
+ st.write(english_input)
80
+
81
+ # Generate text description (100 tokens) using Groq API
82
+ if english_input:
83
+ full_text_description = generate_response(english_input)
84
+ if not full_text_description:
85
+ st.warning("Sorry, the text generation model is unavailable right now πŸ˜₯πŸ˜₯πŸ˜₯. Please try again later.")
86
+ else:
87
+ st.markdown("### Generated Text Response:")
88
+ st.write(full_text_description)
89
+
90
+ # Create image prompt based on the full text description
91
+ image_prompt = generate_response(f"Create a concise image prompt from the following text: {full_text_description}")
92
+ if not image_prompt:
93
+ st.warning("Sorry, the image prompt model is unavailable right now πŸ˜₯πŸ˜₯πŸ˜₯. Please try again later.")
94
+ else:
95
+ # Request an image based on the generated image prompt
96
+ image_data = query_huggingface({"inputs": image_prompt})
97
+ if not image_data:
98
+ st.warning("Sorry, the image generation model is unavailable right now πŸ˜₯πŸ˜₯πŸ˜₯. Please try again later.")
99
+ else:
100
+ try:
101
+ # Load and display the image
102
+ image = Image.open(BytesIO(image_data))
103
+ st.image(image, caption="Generated Image", use_column_width=True)
104
+ except Exception as e:
105
+ st.error(f"Failed to display image: {e}")
 
 
 
 
106
 
107
  # Streamlit interface
108
  st.title("Multi-Modal Generator (Tamil to English)")