Kishorekumar7 commited on
Commit
60b0b0c
·
verified ·
1 Parent(s): a5a0e07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -36
app.py CHANGED
@@ -3,6 +3,8 @@ import torch
3
  import streamlit as st
4
  from groq import Groq
5
  from diffusers import AutoPipelineForText2Image
 
 
6
 
7
  # Load API keys
8
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
@@ -11,21 +13,17 @@ HF_API_KEY = os.getenv("HF_API_KEY")
11
  # Initialize Groq client with API key
12
  client = Groq(api_key=GROQ_API_KEY)
13
 
14
- # Select device (GPU if available, else CPU)
15
- device = "cuda" if torch.cuda.is_available() else "cpu"
16
- st.write(f"Using device: {device}") # Display device info
17
-
18
  # Load lightweight Hugging Face image generation model
19
  image_gen = AutoPipelineForText2Image.from_pretrained(
20
  "stabilityai/sdxl-turbo", use_auth_token=HF_API_KEY
21
  )
22
- image_gen.to(device)
23
 
24
  # Function to transcribe Tamil audio using Groq's Whisper
25
- def transcribe(audio_file):
26
- with open(audio_file, "rb") as file:
27
  transcription = client.audio.transcriptions.create(
28
- file=(audio_file, file.read()),
29
  model="whisper-large-v3",
30
  language="ta", # Tamil
31
  response_format="verbose_json"
@@ -38,7 +36,7 @@ def translate_text(tamil_text):
38
  model="gemma-7b-it",
39
  messages=[{"role": "user", "content": f"Translate this Tamil text to English: {tamil_text}"}]
40
  )
41
- return response.choices[0].message.content
42
 
43
  # Function to generate text using Groq's DeepSeek R1
44
  def generate_text(prompt):
@@ -46,7 +44,7 @@ def generate_text(prompt):
46
  model="deepseek-coder-r1-7b",
47
  messages=[{"role": "user", "content": f"Write a short story about: {prompt}"}]
48
  )
49
- return response.choices[0].message.content
50
 
51
  # Function to generate an image
52
  def generate_image(prompt):
@@ -56,29 +54,41 @@ def generate_image(prompt):
56
  # Streamlit UI
57
  st.title("Tamil Speech to Image & Story Generator")
58
 
59
- # File uploader for audio
60
- uploaded_audio = st.file_uploader("Upload your Tamil speech", type=["wav", "mp3", "m4a"])
61
-
62
- if uploaded_audio is not None:
63
- st.audio(uploaded_audio, format="audio/wav")
64
-
65
- if st.button("Generate"):
66
- with st.spinner("Transcribing..."):
67
- tamil_text = transcribe(uploaded_audio)
68
- st.success("Transcription complete!")
69
- st.text_area("Tamil Text Output", tamil_text)
70
-
71
- with st.spinner("Translating to English..."):
72
- english_text = translate_text(tamil_text)
73
- st.success("Translation complete!")
74
- st.text_area("Translated English Text", english_text)
75
-
76
- with st.spinner("Generating story..."):
77
- story = generate_text(english_text)
78
- st.success("Story generation complete!")
79
- st.text_area("Generated Story", story)
80
-
81
- with st.spinner("Generating image..."):
82
- image = generate_image(english_text)
83
- st.success("Image generation complete!")
84
- st.image(image, caption="Generated Image")
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import streamlit as st
4
  from groq import Groq
5
  from diffusers import AutoPipelineForText2Image
6
+ import tempfile
7
+ import soundfile as sf
8
 
9
  # Load API keys
10
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
 
13
  # Initialize Groq client with API key
14
  client = Groq(api_key=GROQ_API_KEY)
15
 
 
 
 
 
16
  # Load lightweight Hugging Face image generation model
17
  image_gen = AutoPipelineForText2Image.from_pretrained(
18
  "stabilityai/sdxl-turbo", use_auth_token=HF_API_KEY
19
  )
20
+ image_gen.to("cuda" if torch.cuda.is_available() else "cpu")
21
 
22
  # Function to transcribe Tamil audio using Groq's Whisper
23
+ def transcribe(audio_path):
24
+ with open(audio_path, "rb") as file:
25
  transcription = client.audio.transcriptions.create(
26
+ file=(audio_path, file.read()),
27
  model="whisper-large-v3",
28
  language="ta", # Tamil
29
  response_format="verbose_json"
 
36
  model="gemma-7b-it",
37
  messages=[{"role": "user", "content": f"Translate this Tamil text to English: {tamil_text}"}]
38
  )
39
+ return response.choices[0].delta.content
40
 
41
  # Function to generate text using Groq's DeepSeek R1
42
  def generate_text(prompt):
 
44
  model="deepseek-coder-r1-7b",
45
  messages=[{"role": "user", "content": f"Write a short story about: {prompt}"}]
46
  )
47
+ return response.choices[0].delta.content
48
 
49
  # Function to generate an image
50
  def generate_image(prompt):
 
54
  # Streamlit UI
55
  st.title("Tamil Speech to Image & Story Generator")
56
 
57
+ # Audio input - Recording or Uploading
58
+ st.subheader("Upload or Record Audio")
59
+ recorded_audio = st.audio("", format='audio/wav', start_time=0)
60
+ uploaded_file = st.file_uploader("Upload an audio file", type=["wav", "mp3", "m4a"])
61
+
62
+ audio_path = None
63
+
64
+ if uploaded_file is not None:
65
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
66
+ temp_audio.write(uploaded_file.read())
67
+ audio_path = temp_audio.name
68
+ elif recorded_audio:
69
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
70
+ audio_data, samplerate = sf.read(recorded_audio)
71
+ sf.write(temp_audio.name, audio_data, samplerate)
72
+ audio_path = temp_audio.name
73
+
74
+ if st.button("Generate") and audio_path:
75
+ with st.spinner("Transcribing Tamil speech..."):
76
+ tamil_text = transcribe(audio_path)
77
+ with st.spinner("Translating to English..."):
78
+ english_text = translate_text(tamil_text)
79
+ with st.spinner("Generating story..."):
80
+ story = generate_text(english_text)
81
+ with st.spinner("Generating image..."):
82
+ image = generate_image(english_text)
83
+
84
+ st.subheader("Tamil Transcription")
85
+ st.write(tamil_text)
86
+
87
+ st.subheader("English Translation")
88
+ st.write(english_text)
89
+
90
+ st.subheader("Generated Story")
91
+ st.write(story)
92
+
93
+ st.subheader("Generated Image")
94
+ st.image(image)