Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -24,18 +24,19 @@ QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], temp
|
|
24 |
|
25 |
def invoke(openai_api_key, youtube_url, prompt):
|
26 |
openai.api_key = openai_api_key
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
36 |
result = qa_chain({"query": prompt})
|
37 |
-
shutil.rmtree(youtube_dir)
|
38 |
-
shutil.rmtree(chroma_dir)
|
39 |
return result["result"]
|
40 |
|
41 |
description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
|
@@ -47,7 +48,7 @@ description = """The app demonstrates how to use a <strong>Large Language Model<
|
|
47 |
|
48 |
gr.close_all()
|
49 |
demo = gr.Interface(fn=invoke,
|
50 |
-
inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Textbox(label = "Prompt", value = "
|
51 |
outputs = [gr.Textbox(label = "Completion", lines = 1)],
|
52 |
title = "Generative AI - LLM & RAG",
|
53 |
description = description)
|
|
|
24 |
|
25 |
def invoke(openai_api_key, youtube_url, prompt):
|
26 |
openai.api_key = openai_api_key
|
27 |
+
if vectordb == None:
|
28 |
+
youtube_dir = "docs/youtube/"
|
29 |
+
loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
|
30 |
+
docs = loader.load()
|
31 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
|
32 |
+
splits = text_splitter.split_documents(docs)
|
33 |
+
chroma_dir = "docs/chroma/"
|
34 |
+
vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
|
35 |
+
llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
|
36 |
+
qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
|
37 |
result = qa_chain({"query": prompt})
|
38 |
+
#shutil.rmtree(youtube_dir)
|
39 |
+
#shutil.rmtree(chroma_dir)
|
40 |
return result["result"]
|
41 |
|
42 |
description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data.
|
|
|
48 |
|
49 |
gr.close_all()
|
50 |
demo = gr.Interface(fn=invoke,
|
51 |
+
inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Textbox(label = "Prompt", value = "GPT-4 human level performance", lines = 1)],
|
52 |
outputs = [gr.Textbox(label = "Completion", lines = 1)],
|
53 |
title = "Generative AI - LLM & RAG",
|
54 |
description = description)
|