Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -45,6 +45,7 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
45 |
if (os.path.isdir(CHROMA_DIR)):
|
46 |
vector_db = Chroma(embedding_function = OpenAIEmbeddings(),
|
47 |
persist_directory = CHROMA_DIR)
|
|
|
48 |
else:
|
49 |
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL], YOUTUBE_DIR),
|
50 |
OpenAIWhisperParser())
|
@@ -55,6 +56,7 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
55 |
vector_db = Chroma.from_documents(documents = splits,
|
56 |
embedding = OpenAIEmbeddings(),
|
57 |
persist_directory = CHROMA_DIR)
|
|
|
58 |
rag_chain = RetrievalQA.from_chain_type(llm,
|
59 |
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
|
60 |
retriever = vector_db.as_retriever(search_kwargs = {"k": 3}),
|
@@ -65,7 +67,7 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
65 |
else:
|
66 |
chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
|
67 |
result = chain.run({"question": prompt})
|
68 |
-
|
69 |
return result
|
70 |
|
71 |
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
|
|
|
45 |
if (os.path.isdir(CHROMA_DIR)):
|
46 |
vector_db = Chroma(embedding_function = OpenAIEmbeddings(),
|
47 |
persist_directory = CHROMA_DIR)
|
48 |
+
print(os.listdir("Load DB"))
|
49 |
else:
|
50 |
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL], YOUTUBE_DIR),
|
51 |
OpenAIWhisperParser())
|
|
|
56 |
vector_db = Chroma.from_documents(documents = splits,
|
57 |
embedding = OpenAIEmbeddings(),
|
58 |
persist_directory = CHROMA_DIR)
|
59 |
+
print(os.listdir("Make DB"))
|
60 |
rag_chain = RetrievalQA.from_chain_type(llm,
|
61 |
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
|
62 |
retriever = vector_db.as_retriever(search_kwargs = {"k": 3}),
|
|
|
67 |
else:
|
68 |
chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
|
69 |
result = chain.run({"question": prompt})
|
70 |
+
print(os.listdir("/data/chroma/"))
|
71 |
return result
|
72 |
|
73 |
description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data
|