Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -120,8 +120,8 @@ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_n
|
|
| 120 |
trace = Trace(
|
| 121 |
name = chain_name,
|
| 122 |
kind = "chain",
|
| 123 |
-
status_code = "SUCCESS" if (status_msg == "") else "ERROR",
|
| 124 |
-
status_message = status_msg,
|
| 125 |
metadata={
|
| 126 |
"chunk_overlap": config["chunk_overlap"],
|
| 127 |
"chunk_size": config["chunk_size"],
|
|
@@ -157,7 +157,7 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
| 157 |
completion = rag_chain(llm, prompt, db)
|
| 158 |
result = completion["result"]
|
| 159 |
prompt_template = rag_template
|
| 160 |
-
chain_name = RetrievalQA
|
| 161 |
elif (rag_option == "MongoDB"):
|
| 162 |
#splits = document_loading_splitting()
|
| 163 |
#document_storage_mongodb(splits)
|
|
@@ -165,12 +165,12 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
| 165 |
completion = rag_chain(llm, prompt, db)
|
| 166 |
result = completion["result"]
|
| 167 |
prompt_template = rag_template
|
| 168 |
-
chain_name = RetrievalQA
|
| 169 |
else:
|
| 170 |
result = llm_chain(llm, prompt)
|
| 171 |
completion = result
|
| 172 |
prompt_template = llm_template
|
| 173 |
-
chain_name = LLMChain
|
| 174 |
except Exception as e:
|
| 175 |
status_msg = e
|
| 176 |
raise gr.Error(e)
|
|
|
|
| 120 |
trace = Trace(
|
| 121 |
name = chain_name,
|
| 122 |
kind = "chain",
|
| 123 |
+
status_code = "SUCCESS" if (str(status_msg) == "") else "ERROR",
|
| 124 |
+
status_message = str(status_msg),
|
| 125 |
metadata={
|
| 126 |
"chunk_overlap": config["chunk_overlap"],
|
| 127 |
"chunk_size": config["chunk_size"],
|
|
|
|
| 157 |
completion = rag_chain(llm, prompt, db)
|
| 158 |
result = completion["result"]
|
| 159 |
prompt_template = rag_template
|
| 160 |
+
chain_name = "RetrievalQA"
|
| 161 |
elif (rag_option == "MongoDB"):
|
| 162 |
#splits = document_loading_splitting()
|
| 163 |
#document_storage_mongodb(splits)
|
|
|
|
| 165 |
completion = rag_chain(llm, prompt, db)
|
| 166 |
result = completion["result"]
|
| 167 |
prompt_template = rag_template
|
| 168 |
+
chain_name = "RetrievalQA"
|
| 169 |
else:
|
| 170 |
result = llm_chain(llm, prompt)
|
| 171 |
completion = result
|
| 172 |
prompt_template = llm_template
|
| 173 |
+
chain_name = "LLMChain"
|
| 174 |
except Exception as e:
|
| 175 |
status_msg = e
|
| 176 |
raise gr.Error(e)
|