bstraehle commited on
Commit
57e6710
·
1 Parent(s): 0266040

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -12
app.py CHANGED
@@ -24,18 +24,19 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], temp
24
 
25
  def invoke(openai_api_key, youtube_url, prompt):
26
  openai.api_key = openai_api_key
27
- youtube_dir = "docs/youtube/"
28
- loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
29
- docs = loader.load()
30
- text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
31
- splits = text_splitter.split_documents(docs)
32
- chroma_dir = "docs/chroma/"
33
- vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
34
- llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
35
- qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
 
36
  result = qa_chain({"query": prompt})
37
- shutil.rmtree(youtube_dir)
38
- shutil.rmtree(chroma_dir)
39
  return result["result"]
40
 
41
  description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
@@ -47,7 +48,7 @@ description = """The app demonstrates how to use a <strong>Large Language Model<
47
 
48
  gr.close_all()
49
  demo = gr.Interface(fn=invoke,
50
- inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Textbox(label = "Prompt", value = "What is GPT-4?", lines = 1)],
51
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
52
  title = "Generative AI - LLM & RAG",
53
  description = description)
 
24
 
25
  def invoke(openai_api_key, youtube_url, prompt):
26
  openai.api_key = openai_api_key
27
+ if vectordb == None:
28
+ youtube_dir = "docs/youtube/"
29
+ loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
30
+ docs = loader.load()
31
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
32
+ splits = text_splitter.split_documents(docs)
33
+ chroma_dir = "docs/chroma/"
34
+ vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
35
+ llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
36
+ qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
37
  result = qa_chain({"query": prompt})
38
+ #shutil.rmtree(youtube_dir)
39
+ #shutil.rmtree(chroma_dir)
40
  return result["result"]
41
 
42
  description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
 
48
 
49
  gr.close_all()
50
  demo = gr.Interface(fn=invoke,
51
+ inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Textbox(label = "Prompt", value = "GPT-4 human level performance", lines = 1)],
52
  outputs = [gr.Textbox(label = "Completion", lines = 1)],
53
  title = "Generative AI - LLM & RAG",
54
  description = description)