Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,14 +4,22 @@ import re
|
|
4 |
from dotenv import load_dotenv
|
5 |
import os
|
6 |
from groq import Groq
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
# Load the API
|
9 |
load_dotenv()
|
10 |
-
|
11 |
-
|
12 |
|
13 |
-
|
14 |
-
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def get_transcript(url):
|
17 |
try:
|
@@ -26,17 +34,41 @@ def get_transcript(url):
|
|
26 |
except Exception as e:
|
27 |
return f"Error: {str(e)}"
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
def answer_question(transcript, question):
|
30 |
try:
|
31 |
-
response =
|
32 |
model="mixtral-8x7b-32768",
|
33 |
messages=[
|
34 |
{"role": "system", "content": "You are a helpful assistant."},
|
35 |
-
{"role": "user", "content": f"Using the following transcript as context
|
36 |
],
|
37 |
-
max_tokens=
|
38 |
)
|
39 |
-
|
40 |
if response.choices and response.choices[0].message:
|
41 |
answer = response.choices[0].message.content.strip()
|
42 |
return answer
|
@@ -45,24 +77,41 @@ def answer_question(transcript, question):
|
|
45 |
except Exception as e:
|
46 |
return f"Error in answering question: {str(e)}"
|
47 |
|
48 |
-
def handle_query(
|
49 |
-
transcript = get_transcript(youtube_url)
|
50 |
if "Error" in transcript:
|
51 |
return transcript
|
52 |
answer = answer_question(transcript, question)
|
53 |
return answer
|
54 |
|
55 |
-
st.title("
|
56 |
-
youtube_url = st.text_input("YouTube URL", placeholder="Enter YouTube URL here...")
|
57 |
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
st.write("
|
68 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
from dotenv import load_dotenv
|
5 |
import os
|
6 |
from groq import Groq
|
7 |
+
from openai import OpenAI
|
8 |
+
import tempfile
|
9 |
+
from pydub import AudioSegment
|
10 |
+
import io
|
11 |
|
12 |
+
# Load the API keys from .env file
|
13 |
load_dotenv()
|
14 |
+
groq_api_key = os.getenv('GROQ_API_KEY')
|
15 |
+
openai_api_key = os.getenv('OPENAI_API_KEY')
|
16 |
|
17 |
+
# Initialize clients
|
18 |
+
groq_client = Groq(api_key=groq_api_key)
|
19 |
+
openai_client = OpenAI(api_key=openai_api_key)
|
20 |
+
|
21 |
+
if not groq_api_key or not openai_api_key:
|
22 |
+
raise ValueError("API keys are not set. Please check your .env file and ensure GROQ_API_KEY and OPENAI_API_KEY are set.")
|
23 |
|
24 |
def get_transcript(url):
|
25 |
try:
|
|
|
34 |
except Exception as e:
|
35 |
return f"Error: {str(e)}"
|
36 |
|
37 |
+
def convert_to_supported_format(file):
|
38 |
+
audio = AudioSegment.from_file(file)
|
39 |
+
buffer = io.BytesIO()
|
40 |
+
audio.export(buffer, format="mp3")
|
41 |
+
buffer.seek(0)
|
42 |
+
return buffer
|
43 |
+
|
44 |
+
def transcribe_audio(file):
|
45 |
+
file = convert_to_supported_format(file)
|
46 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_file:
|
47 |
+
temp_file.write(file.getvalue())
|
48 |
+
temp_file_path = temp_file.name
|
49 |
+
|
50 |
+
try:
|
51 |
+
with open(temp_file_path, "rb") as audio_file:
|
52 |
+
transcript = openai_client.audio.transcriptions.create(
|
53 |
+
model="whisper-1",
|
54 |
+
file=audio_file
|
55 |
+
)
|
56 |
+
return transcript.text
|
57 |
+
except Exception as e:
|
58 |
+
return f"Error in transcription: {str(e)}"
|
59 |
+
finally:
|
60 |
+
os.remove(temp_file_path)
|
61 |
+
|
62 |
def answer_question(transcript, question):
|
63 |
try:
|
64 |
+
response = groq_client.chat.completions.create(
|
65 |
model="mixtral-8x7b-32768",
|
66 |
messages=[
|
67 |
{"role": "system", "content": "You are a helpful assistant."},
|
68 |
+
{"role": "user", "content": f"Using the following transcript as context, please answer the question:\n\nTranscript:\n{transcript}\n\nQuestion:\n{question}"}
|
69 |
],
|
70 |
+
max_tokens=150
|
71 |
)
|
|
|
72 |
if response.choices and response.choices[0].message:
|
73 |
answer = response.choices[0].message.content.strip()
|
74 |
return answer
|
|
|
77 |
except Exception as e:
|
78 |
return f"Error in answering question: {str(e)}"
|
79 |
|
80 |
+
def handle_query(transcript, question):
|
|
|
81 |
if "Error" in transcript:
|
82 |
return transcript
|
83 |
answer = answer_question(transcript, question)
|
84 |
return answer
|
85 |
|
86 |
+
st.title("Video/Audio Doubt Bot")
|
|
|
87 |
|
88 |
+
option = st.selectbox("Choose input type", ("YouTube URL", "Upload audio/video file"))
|
89 |
+
|
90 |
+
if option == "YouTube URL":
|
91 |
+
youtube_url = st.text_input("YouTube URL", placeholder="Enter YouTube URL here...")
|
92 |
+
if youtube_url:
|
93 |
+
transcript = get_transcript(youtube_url)
|
94 |
+
if "Error" in transcript:
|
95 |
+
st.write(transcript)
|
96 |
+
else:
|
97 |
+
st.write("Transcript successfully loaded.")
|
98 |
+
question = st.text_input("Ask a question about the video")
|
99 |
+
if st.button("Get Answer"):
|
100 |
+
answer = handle_query(transcript, question)
|
101 |
+
st.write("### Answer")
|
102 |
+
st.write(answer)
|
103 |
+
|
104 |
+
elif option == "Upload audio/video file":
|
105 |
+
uploaded_file = st.file_uploader("Choose an audio or video file", type=["mp3", "mp4", "wav"])
|
106 |
+
if uploaded_file is not None:
|
107 |
+
with st.spinner('Transcribing audio...'):
|
108 |
+
transcript = transcribe_audio(uploaded_file)
|
109 |
+
if "Error" in transcript:
|
110 |
+
st.write(transcript)
|
111 |
+
else:
|
112 |
+
st.write("File successfully transcribed.")
|
113 |
+
question = st.text_input("Ask a question about the audio/video")
|
114 |
+
if st.button("Get Answer"):
|
115 |
+
answer = handle_query(transcript, question)
|
116 |
+
st.write("### Answer")
|
117 |
+
st.write(answer)
|