Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
import os
|
2 |
import requests
|
3 |
import streamlit as st
|
4 |
from PIL import Image
|
@@ -23,41 +22,48 @@ groq_headers = {
|
|
23 |
|
24 |
# Function to query Hugging Face model for image generation
|
25 |
def query_huggingface(payload):
|
26 |
-
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
29 |
return None
|
30 |
-
return response.content
|
31 |
|
32 |
# Function to generate text using Groq API
|
33 |
def generate_response(prompt):
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
49 |
return None
|
50 |
|
51 |
# Function to translate Tamil to English using MBart model
|
52 |
def translate_tamil_to_english(tamil_text):
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
61 |
|
62 |
# Main function to generate text and image
|
63 |
def generate_image_and_text(user_input):
|
@@ -67,40 +73,36 @@ def generate_image_and_text(user_input):
|
|
67 |
# Translate Tamil to English
|
68 |
english_input = translate_tamil_to_english(user_input)
|
69 |
if not english_input:
|
70 |
-
st.
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
image = Image.open(BytesIO(image_data))
|
101 |
-
st.image(image, caption="Generated Image", use_column_width=True)
|
102 |
-
except Exception as e:
|
103 |
-
st.error(f"Failed to display image: {e}")
|
104 |
|
105 |
# Streamlit interface
|
106 |
st.title("Multi-Modal Generator (Tamil to English)")
|
|
|
|
|
1 |
import requests
|
2 |
import streamlit as st
|
3 |
from PIL import Image
|
|
|
22 |
|
23 |
# Function to query Hugging Face model for image generation
|
24 |
def query_huggingface(payload):
|
25 |
+
try:
|
26 |
+
response = requests.post(HF_API_URL, headers=hf_headers, json=payload)
|
27 |
+
if response.status_code != 200:
|
28 |
+
return None
|
29 |
+
return response.content
|
30 |
+
except Exception:
|
31 |
return None
|
|
|
32 |
|
33 |
# Function to generate text using Groq API
|
34 |
def generate_response(prompt):
|
35 |
+
try:
|
36 |
+
payload = {
|
37 |
+
"model": "mixtral-8x7b-32768",
|
38 |
+
"messages": [
|
39 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
40 |
+
{"role": "user", "content": prompt}
|
41 |
+
],
|
42 |
+
"max_tokens": 100,
|
43 |
+
"temperature": 0.7
|
44 |
+
}
|
45 |
+
response = requests.post(groq_url, json=payload, headers=groq_headers)
|
46 |
+
if response.status_code == 200:
|
47 |
+
result = response.json()
|
48 |
+
return result['choices'][0]['message']['content']
|
49 |
+
else:
|
50 |
+
return None
|
51 |
+
except Exception:
|
52 |
return None
|
53 |
|
54 |
# Function to translate Tamil to English using MBart model
|
55 |
def translate_tamil_to_english(tamil_text):
|
56 |
+
try:
|
57 |
+
model_name = "facebook/mbart-large-50-many-to-one-mmt"
|
58 |
+
model = MBartForConditionalGeneration.from_pretrained(model_name)
|
59 |
+
tokenizer = MBart50Tokenizer.from_pretrained(model_name, src_lang="ta_IN")
|
60 |
+
|
61 |
+
inputs = tokenizer(tamil_text, return_tensors="pt")
|
62 |
+
translated = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
63 |
+
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
64 |
+
return translated_text
|
65 |
+
except Exception:
|
66 |
+
return None
|
67 |
|
68 |
# Main function to generate text and image
|
69 |
def generate_image_and_text(user_input):
|
|
|
73 |
# Translate Tamil to English
|
74 |
english_input = translate_tamil_to_english(user_input)
|
75 |
if not english_input:
|
76 |
+
st.warning("Sorry, the translation model is unavailable right now π₯π₯π₯. Please try again later.")
|
77 |
+
else:
|
78 |
+
st.markdown("### Translated English Text:")
|
79 |
+
st.write(english_input)
|
80 |
+
|
81 |
+
# Generate text description (100 tokens) using Groq API
|
82 |
+
if english_input:
|
83 |
+
full_text_description = generate_response(english_input)
|
84 |
+
if not full_text_description:
|
85 |
+
st.warning("Sorry, the text generation model is unavailable right now π₯π₯π₯. Please try again later.")
|
86 |
+
else:
|
87 |
+
st.markdown("### Generated Text Response:")
|
88 |
+
st.write(full_text_description)
|
89 |
+
|
90 |
+
# Create image prompt based on the full text description
|
91 |
+
image_prompt = generate_response(f"Create a concise image prompt from the following text: {full_text_description}")
|
92 |
+
if not image_prompt:
|
93 |
+
st.warning("Sorry, the image prompt model is unavailable right now π₯π₯π₯. Please try again later.")
|
94 |
+
else:
|
95 |
+
# Request an image based on the generated image prompt
|
96 |
+
image_data = query_huggingface({"inputs": image_prompt})
|
97 |
+
if not image_data:
|
98 |
+
st.warning("Sorry, the image generation model is unavailable right now π₯π₯π₯. Please try again later.")
|
99 |
+
else:
|
100 |
+
try:
|
101 |
+
# Load and display the image
|
102 |
+
image = Image.open(BytesIO(image_data))
|
103 |
+
st.image(image, caption="Generated Image", use_column_width=True)
|
104 |
+
except Exception as e:
|
105 |
+
st.error(f"Failed to display image: {e}")
|
|
|
|
|
|
|
|
|
106 |
|
107 |
# Streamlit interface
|
108 |
st.title("Multi-Modal Generator (Tamil to English)")
|