Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -45,37 +45,40 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
45 |
openai_api_key = openai_api_key,
|
46 |
temperature = 0)
|
47 |
if (use_rag):
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
|
|
|
|
79 |
else:
|
80 |
chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
|
81 |
result = chain.run({"question": prompt})
|
|
|
45 |
openai_api_key = openai_api_key,
|
46 |
temperature = 0)
|
47 |
if (use_rag):
|
48 |
+
try:
|
49 |
+
# Document loading
|
50 |
+
#docs = []
|
51 |
+
# Load PDF
|
52 |
+
#loader = PyPDFLoader(PDF_URL)
|
53 |
+
#docs.extend(loader.load())
|
54 |
+
# Load Web
|
55 |
+
#loader = WebBaseLoader(WEB_URL_1)
|
56 |
+
#docs.extend(loader.load())
|
57 |
+
# Load YouTube
|
58 |
+
#loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
|
59 |
+
# YOUTUBE_URL_2,
|
60 |
+
# YOUTUBE_URL_3], YOUTUBE_DIR),
|
61 |
+
# OpenAIWhisperParser())
|
62 |
+
#docs.extend(loader.load())
|
63 |
+
# Document splitting
|
64 |
+
#text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
|
65 |
+
# chunk_size = 1500)
|
66 |
+
#splits = text_splitter.split_documents(docs)
|
67 |
+
# Document storage
|
68 |
+
#vector_db = Chroma.from_documents(documents = splits,
|
69 |
+
# embedding = OpenAIEmbeddings(disallowed_special = ()),
|
70 |
+
# persist_directory = CHROMA_DIR)
|
71 |
+
# Document retrieval
|
72 |
+
vector_db = Chroma(embedding_function = OpenAIEmbeddings(),
|
73 |
+
persist_directory = CHROMA_DIR)
|
74 |
+
rag_chain = RetrievalQA.from_chain_type(llm,
|
75 |
+
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
|
76 |
+
retriever = vector_db.as_retriever(search_kwargs = {"k": 3}),
|
77 |
+
return_source_documents = True)
|
78 |
+
result = rag_chain({"query": prompt})
|
79 |
+
result = result["result"]
|
80 |
+
except Exception as e
|
81 |
+
returns e
|
82 |
else:
|
83 |
chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
|
84 |
result = chain.run({"question": prompt})
|