SyedHasanCronosPMC commited on
Commit
40cedea
Β·
verified Β·
1 Parent(s): 33d410e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -20
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import os
2
- import tempfile
3
  import gradio as gr
4
  from langchain.document_loaders import PyPDFLoader, YoutubeLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
@@ -11,23 +10,21 @@ from langchain.chat_models import init_chat_model
11
  # --- API KEY HANDLING ---
12
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or os.getenv("openai")
13
  if not OPENAI_API_KEY:
14
- raise ValueError("❌ OPENAI API Key not found. Please add it to secrets as 'OPENAI_API_KEY' or 'openai'.")
15
 
16
- # --- GRADIO PIPELINE FUNCTION ---
17
  def process_inputs(pdf_file, youtube_url, query):
18
  docs = []
19
 
20
  # Load PDF
21
  try:
22
- with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as tmp:
23
- tmp.write(pdf_file.read())
24
- pdf_path = tmp.name
25
  pdf_loader = PyPDFLoader(pdf_path)
26
  docs.extend(pdf_loader.load())
27
  except Exception as e:
28
  return f"❌ Failed to load PDF: {e}"
29
 
30
- # Load YouTube transcript
31
  try:
32
  yt_loader = YoutubeLoader.from_youtube_url(youtube_url, add_video_info=False)
33
  docs.extend(yt_loader.load())
@@ -37,38 +34,39 @@ def process_inputs(pdf_file, youtube_url, query):
37
  if not docs:
38
  return "❌ No documents could be loaded from the PDF or YouTube URL."
39
 
40
- # Split
41
  splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
42
  splits = splitter.split_documents(docs)
43
 
44
- # Embed + Vectorstore
45
- embedding = OpenAIEmbeddings(model="text-embedding-3-large", api_key=OPENAI_API_KEY)
46
- db = FAISS.from_documents(splits, embedding)
 
 
 
47
 
48
  # QA Chain
49
- llm = init_chat_model("gpt-4o-mini", model_provider="openai", api_key=OPENAI_API_KEY)
50
- qa = RetrievalQA.from_chain_type(llm, retriever=db.as_retriever())
51
-
52
- # Query
53
  try:
 
 
54
  result = qa.invoke({"query": query})
55
  return result["result"]
56
  except Exception as e:
57
- return f"❌ Error during retrieval: {e}"
58
 
59
- # --- GRADIO UI ---
60
  with gr.Blocks() as demo:
61
  gr.Markdown("## πŸ“š Ask Questions from PDF + YouTube Transcript")
62
 
63
  with gr.Row():
64
  pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
65
  yt_input = gr.Textbox(label="YouTube URL", placeholder="https://www.youtube.com/watch?v=...")
66
-
67
- query_input = gr.Textbox(label="Your Question", placeholder="What did the video/PDF say about X?")
68
  output = gr.Textbox(label="Answer")
69
 
70
  run_button = gr.Button("Get Answer")
71
  run_button.click(fn=process_inputs, inputs=[pdf_input, yt_input, query_input], outputs=output)
72
 
73
  if __name__ == "__main__":
74
- demo.launch()
 
1
  import os
 
2
  import gradio as gr
3
  from langchain.document_loaders import PyPDFLoader, YoutubeLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
10
  # --- API KEY HANDLING ---
11
  OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") or os.getenv("openai")
12
  if not OPENAI_API_KEY:
13
+ raise ValueError("❌ OPENAI API Key not found. Please add it in Hugging Face secrets as 'OPENAI_API_KEY' or 'openai'.")
14
 
15
+ # --- PROCESSING PIPELINE FUNCTION ---
16
  def process_inputs(pdf_file, youtube_url, query):
17
  docs = []
18
 
19
  # Load PDF
20
  try:
21
+ pdf_path = pdf_file.name # βœ… Use .name to get the actual file path from Gradio
 
 
22
  pdf_loader = PyPDFLoader(pdf_path)
23
  docs.extend(pdf_loader.load())
24
  except Exception as e:
25
  return f"❌ Failed to load PDF: {e}"
26
 
27
+ # Load YouTube Transcript
28
  try:
29
  yt_loader = YoutubeLoader.from_youtube_url(youtube_url, add_video_info=False)
30
  docs.extend(yt_loader.load())
 
34
  if not docs:
35
  return "❌ No documents could be loaded from the PDF or YouTube URL."
36
 
37
+ # Split documents
38
  splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
39
  splits = splitter.split_documents(docs)
40
 
41
+ # Embedding + Vector Store
42
+ try:
43
+ embedding = OpenAIEmbeddings(model="text-embedding-3-large", api_key=OPENAI_API_KEY)
44
+ db = FAISS.from_documents(splits, embedding)
45
+ except Exception as e:
46
+ return f"❌ Embedding failed: {e}"
47
 
48
  # QA Chain
 
 
 
 
49
  try:
50
+ llm = init_chat_model("gpt-4o-mini", model_provider="openai", api_key=OPENAI_API_KEY)
51
+ qa = RetrievalQA.from_chain_type(llm, retriever=db.as_retriever())
52
  result = qa.invoke({"query": query})
53
  return result["result"]
54
  except Exception as e:
55
+ return f"❌ Retrieval failed: {e}"
56
 
57
+ # --- GRADIO APP ---
58
  with gr.Blocks() as demo:
59
  gr.Markdown("## πŸ“š Ask Questions from PDF + YouTube Transcript")
60
 
61
  with gr.Row():
62
  pdf_input = gr.File(label="Upload PDF", file_types=[".pdf"])
63
  yt_input = gr.Textbox(label="YouTube URL", placeholder="https://www.youtube.com/watch?v=...")
64
+
65
+ query_input = gr.Textbox(label="Your Question", placeholder="e.g., What did the PDF say about X?")
66
  output = gr.Textbox(label="Answer")
67
 
68
  run_button = gr.Button("Get Answer")
69
  run_button.click(fn=process_inputs, inputs=[pdf_input, yt_input, query_input], outputs=output)
70
 
71
  if __name__ == "__main__":
72
+ demo.launch()