Update app.py
Browse files
app.py
CHANGED
@@ -117,7 +117,7 @@ def rag_chain(llm, prompt, db):
|
|
117 |
def wandb_log(prompt, completion, rag_option):
|
118 |
wandb.login(key = wandb_api_key)
|
119 |
wandb.init(project = "openai-llm-rag", config = config)
|
120 |
-
wandb.log({"prompt": prompt, "completion": completion, "rag_option": rag_option})
|
121 |
wandb.finish()
|
122 |
|
123 |
def invoke(openai_api_key, rag_option, prompt):
|
@@ -137,20 +137,17 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
137 |
db = document_retrieval_chroma(llm, prompt)
|
138 |
completion = rag_chain(llm, prompt, db)
|
139 |
result = completion["result"]
|
140 |
-
completion = json.dumps(completion)
|
141 |
elif (rag_option == "MongoDB"):
|
142 |
#splits = document_loading_splitting()
|
143 |
#document_storage_mongodb(splits)
|
144 |
db = document_retrieval_mongodb(llm, prompt)
|
145 |
completion = rag_chain(llm, prompt, db)
|
146 |
result = completion["result"]
|
147 |
-
completion = json.dumps(completion)
|
148 |
else:
|
149 |
result = llm_chain(llm, prompt)
|
150 |
completion = result
|
151 |
except Exception as e:
|
152 |
raise gr.Error(e)
|
153 |
-
print(completion)
|
154 |
wandb_log(prompt, completion, rag_option)
|
155 |
return result
|
156 |
|
|
|
117 |
def wandb_log(prompt, completion, rag_option):
|
118 |
wandb.login(key = wandb_api_key)
|
119 |
wandb.init(project = "openai-llm-rag", config = config)
|
120 |
+
wandb.log({"prompt": prompt, "completion": str(completion), "rag_option": rag_option})
|
121 |
wandb.finish()
|
122 |
|
123 |
def invoke(openai_api_key, rag_option, prompt):
|
|
|
137 |
db = document_retrieval_chroma(llm, prompt)
|
138 |
completion = rag_chain(llm, prompt, db)
|
139 |
result = completion["result"]
|
|
|
140 |
elif (rag_option == "MongoDB"):
|
141 |
#splits = document_loading_splitting()
|
142 |
#document_storage_mongodb(splits)
|
143 |
db = document_retrieval_mongodb(llm, prompt)
|
144 |
completion = rag_chain(llm, prompt, db)
|
145 |
result = completion["result"]
|
|
|
146 |
else:
|
147 |
result = llm_chain(llm, prompt)
|
148 |
completion = result
|
149 |
except Exception as e:
|
150 |
raise gr.Error(e)
|
|
|
151 |
wandb_log(prompt, completion, rag_option)
|
152 |
return result
|
153 |
|