Update app.py
Browse files
app.py
CHANGED
@@ -34,9 +34,6 @@ config = {
|
|
34 |
"temperature": 0,
|
35 |
}
|
36 |
|
37 |
-
wandb.login(key = wandb_api_key)
|
38 |
-
wandb.init(project = "openai-llm-rag", config = config)
|
39 |
-
|
40 |
template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say
|
41 |
"🧠 Thanks for using the app - Bernd" at the end of the answer. """
|
42 |
|
@@ -114,6 +111,12 @@ def rag_chain(llm, prompt, db):
|
|
114 |
completion = rag_chain({"query": prompt})
|
115 |
return completion["result"]
|
116 |
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
def invoke(openai_api_key, rag_option, prompt):
|
118 |
if (openai_api_key == ""):
|
119 |
raise gr.Error("OpenAI API Key is required.")
|
@@ -139,7 +142,7 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
139 |
completion = llm_chain(llm, prompt)
|
140 |
except Exception as e:
|
141 |
raise gr.Error(e)
|
142 |
-
|
143 |
return completion
|
144 |
|
145 |
description = """<strong>Overview:</strong> Context-aware multimodal reasoning application that demonstrates a <strong>large language model (LLM)</strong> with
|
|
|
34 |
"temperature": 0,
|
35 |
}
|
36 |
|
|
|
|
|
|
|
37 |
template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say
|
38 |
"🧠 Thanks for using the app - Bernd" at the end of the answer. """
|
39 |
|
|
|
111 |
completion = rag_chain({"query": prompt})
|
112 |
return completion["result"]
|
113 |
|
114 |
+
def wandb_log(prompt, completion, rag_option):
|
115 |
+
wandb.login(key = wandb_api_key)
|
116 |
+
wandb.init(project = "openai-llm-rag", config = config)
|
117 |
+
wandb.log({"prompt": prompt, "completion": completion, "rag_option": rag_option})
|
118 |
+
wandb.finish()
|
119 |
+
|
120 |
def invoke(openai_api_key, rag_option, prompt):
|
121 |
if (openai_api_key == ""):
|
122 |
raise gr.Error("OpenAI API Key is required.")
|
|
|
142 |
completion = llm_chain(llm, prompt)
|
143 |
except Exception as e:
|
144 |
raise gr.Error(e)
|
145 |
+
wandb_log(prompt, completion, rag_option)
|
146 |
return completion
|
147 |
|
148 |
description = """<strong>Overview:</strong> Context-aware multimodal reasoning application that demonstrates a <strong>large language model (LLM)</strong> with
|