Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,13 +3,20 @@ import requests
|
|
3 |
import os
|
4 |
|
5 |
# Fetch Hugging Face and Groq API keys from secrets
|
6 |
-
|
7 |
-
|
|
|
|
|
8 |
|
9 |
# API Headers
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
13 |
"Content-Type": "application/json"
|
14 |
}
|
15 |
|
@@ -22,13 +29,14 @@ image_generation_url = "https://api-inference.huggingface.co/models/black-forest
|
|
22 |
# Function to query Hugging Face translation model
|
23 |
def translate_text(text):
|
24 |
payload = {"inputs": text}
|
25 |
-
response = requests.post(translation_url, headers=
|
26 |
if response.status_code == 200:
|
27 |
result = response.json()
|
28 |
translated_text = result[0]['generated_text']
|
29 |
return translated_text
|
30 |
else:
|
31 |
st.error(f"Translation Error {response.status_code}: {response.text}")
|
|
|
32 |
return None
|
33 |
|
34 |
# Function to query Groq content generation model
|
@@ -43,7 +51,7 @@ def generate_content(english_text, max_tokens, temperature):
|
|
43 |
"max_tokens": max_tokens,
|
44 |
"temperature": temperature
|
45 |
}
|
46 |
-
response = requests.post(url, json=payload, headers=
|
47 |
if response.status_code == 200:
|
48 |
result = response.json()
|
49 |
return result['choices'][0]['message']['content']
|
@@ -61,7 +69,7 @@ def generate_image_prompt(english_text):
|
|
61 |
],
|
62 |
"max_tokens": 30
|
63 |
}
|
64 |
-
response = requests.post("https://api.groq.com/openai/v1/chat/completions", json=payload, headers=
|
65 |
if response.status_code == 200:
|
66 |
result = response.json()
|
67 |
return result['choices'][0]['message']['content']
|
@@ -72,7 +80,7 @@ def generate_image_prompt(english_text):
|
|
72 |
# Function to generate an image from the prompt
|
73 |
def generate_image(image_prompt):
|
74 |
data = {"inputs": image_prompt}
|
75 |
-
response = requests.post(image_generation_url, headers=
|
76 |
if response.status_code == 200:
|
77 |
return response.content
|
78 |
else:
|
@@ -81,8 +89,37 @@ def generate_image(image_prompt):
|
|
81 |
|
82 |
# Main Streamlit app
|
83 |
def main():
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
# Sidebar for temperature and token adjustment
|
87 |
st.sidebar.header("Settings")
|
88 |
temperature = st.sidebar.slider("Select Temperature", 0.1, 1.0, 0.7)
|
@@ -101,6 +138,9 @@ def main():
|
|
101 |
if tamil_input:
|
102 |
st.write("### Translated English Text:")
|
103 |
english_text = translate_text(tamil_input)
|
|
|
|
|
|
|
104 |
if english_text:
|
105 |
st.success(english_text)
|
106 |
|
@@ -121,4 +161,3 @@ def main():
|
|
121 |
|
122 |
if __name__ == "__main__":
|
123 |
main()
|
124 |
-
|
|
|
3 |
import os
|
4 |
|
5 |
# Fetch Hugging Face and Groq API keys from secrets
|
6 |
+
Transalate_token = os.getenv('HUGGINGFACE_TOKEN')
|
7 |
+
Image_Token = os.getenv('HUGGINGFACE_TOKEN')
|
8 |
+
Content_Token = os.getenv('GROQ_API_KEY')
|
9 |
+
Image_prompt_token = os.getenv('GROQ_API_KEY')
|
10 |
|
11 |
# API Headers
|
12 |
+
Translate = {"Authorization": f"Bearer {Transalate_token}"}
|
13 |
+
Image_generation = {"Authorization": f"Bearer {Image_Token}"}
|
14 |
+
Content_generation = {
|
15 |
+
"Authorization": f"Bearer {Content_Token}",
|
16 |
+
"Content-Type": "application/json"
|
17 |
+
}
|
18 |
+
Image_Prompt = {
|
19 |
+
"Authorization": f"Bearer {Image_prompt_token}",
|
20 |
"Content-Type": "application/json"
|
21 |
}
|
22 |
|
|
|
29 |
# Function to query Hugging Face translation model
|
30 |
def translate_text(text):
|
31 |
payload = {"inputs": text}
|
32 |
+
response = requests.post(translation_url, headers=Translate, json=payload)
|
33 |
if response.status_code == 200:
|
34 |
result = response.json()
|
35 |
translated_text = result[0]['generated_text']
|
36 |
return translated_text
|
37 |
else:
|
38 |
st.error(f"Translation Error {response.status_code}: {response.text}")
|
39 |
+
st.write('Please try again or provide an English input 😥')
|
40 |
return None
|
41 |
|
42 |
# Function to query Groq content generation model
|
|
|
51 |
"max_tokens": max_tokens,
|
52 |
"temperature": temperature
|
53 |
}
|
54 |
+
response = requests.post(url, json=payload, headers=Content_generation)
|
55 |
if response.status_code == 200:
|
56 |
result = response.json()
|
57 |
return result['choices'][0]['message']['content']
|
|
|
69 |
],
|
70 |
"max_tokens": 30
|
71 |
}
|
72 |
+
response = requests.post("https://api.groq.com/openai/v1/chat/completions", json=payload, headers=Image_Prompt)
|
73 |
if response.status_code == 200:
|
74 |
result = response.json()
|
75 |
return result['choices'][0]['message']['content']
|
|
|
80 |
# Function to generate an image from the prompt
|
81 |
def generate_image(image_prompt):
|
82 |
data = {"inputs": image_prompt}
|
83 |
+
response = requests.post(image_generation_url, headers=Image_generation, json=data)
|
84 |
if response.status_code == 200:
|
85 |
return response.content
|
86 |
else:
|
|
|
89 |
|
90 |
# Main Streamlit app
|
91 |
def main():
|
92 |
+
# Custom CSS for background, borders, and other styling
|
93 |
+
st.markdown(
|
94 |
+
"""
|
95 |
+
<style>
|
96 |
+
body {
|
97 |
+
background-image: url('https://wallpapercave.com/wp/wp4008910.jpg');
|
98 |
+
background-size: cover;
|
99 |
+
}
|
100 |
+
.reportview-container {
|
101 |
+
background: rgba(255, 255, 255, 0.85);
|
102 |
+
padding: 2rem;
|
103 |
+
border-radius: 10px;
|
104 |
+
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.1);
|
105 |
+
}
|
106 |
+
.result-container {
|
107 |
+
border: 2px solid #4CAF50;
|
108 |
+
padding: 20px;
|
109 |
+
border-radius: 10px;
|
110 |
+
margin-top: 20px;
|
111 |
+
animation: fadeIn 2s ease;
|
112 |
+
}
|
113 |
+
@keyframes fadeIn {
|
114 |
+
0% { opacity: 0; }
|
115 |
+
100% { opacity: 1; }
|
116 |
+
}
|
117 |
+
</style>
|
118 |
+
""", unsafe_allow_html=True
|
119 |
+
)
|
120 |
+
|
121 |
+
st.title("🌐 Multimodal Generator")
|
122 |
+
|
123 |
# Sidebar for temperature and token adjustment
|
124 |
st.sidebar.header("Settings")
|
125 |
temperature = st.sidebar.slider("Select Temperature", 0.1, 1.0, 0.7)
|
|
|
138 |
if tamil_input:
|
139 |
st.write("### Translated English Text:")
|
140 |
english_text = translate_text(tamil_input)
|
141 |
+
if not english_text:
|
142 |
+
english_text = st.text_input("Translation failed. Please enter English text instead:")
|
143 |
+
|
144 |
if english_text:
|
145 |
st.success(english_text)
|
146 |
|
|
|
161 |
|
162 |
if __name__ == "__main__":
|
163 |
main()
|
|