Update app.py
Browse files
app.py
CHANGED
@@ -120,7 +120,7 @@ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_n
|
|
120 |
trace = Trace(
|
121 |
name = chain_name,
|
122 |
kind = "chain",
|
123 |
-
status_code = "SUCCESS",
|
124 |
status_message = status_msg,
|
125 |
metadata={
|
126 |
"chunk_overlap": config["chunk_overlap"],
|
@@ -129,8 +129,8 @@ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_n
|
|
129 |
"model": config["model"],
|
130 |
"temperature": config["temperature"],
|
131 |
},
|
132 |
-
start_time_ms = 123,
|
133 |
-
end_time_ms = 456,
|
134 |
inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
|
135 |
outputs = {"result": str(result), "completion": str(completion)},
|
136 |
)
|
@@ -157,7 +157,7 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
157 |
completion = rag_chain(llm, prompt, db)
|
158 |
result = completion["result"]
|
159 |
prompt_template = rag_template
|
160 |
-
chain_name = RetrievalQA.__class__
|
161 |
elif (rag_option == "MongoDB"):
|
162 |
#splits = document_loading_splitting()
|
163 |
#document_storage_mongodb(splits)
|
@@ -165,12 +165,12 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
165 |
completion = rag_chain(llm, prompt, db)
|
166 |
result = completion["result"]
|
167 |
prompt_template = rag_template
|
168 |
-
chain_name = RetrievalQA.__class__
|
169 |
else:
|
170 |
result = llm_chain(llm, prompt)
|
171 |
completion = result
|
172 |
prompt_template = llm_template
|
173 |
-
chain_name = LLMChain.__class__
|
174 |
except Exception as e:
|
175 |
status_msg = e
|
176 |
raise gr.Error(e)
|
@@ -200,7 +200,7 @@ description = """<strong>Overview:</strong> Context-aware multimodal reasoning a
|
|
200 |
<strong>Speech-to-text</strong> via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via
|
201 |
<a href='https://openai.com/blog/new-and-improved-embedding-model'>text-embedding-ada-002</a> model, and <strong>text generation</strong> via
|
202 |
<a href='""" + WEB_URL + """'>gpt-4</a> model. Implementation via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit.
|
203 |
-
RAG evaluation via <a href='https://wandb.ai/bstraehle/openai-llm-rag/
|
204 |
|
205 |
gr.close_all()
|
206 |
demo = gr.Interface(fn=invoke,
|
|
|
120 |
trace = Trace(
|
121 |
name = chain_name,
|
122 |
kind = "chain",
|
123 |
+
status_code = "SUCCESS" if (status_msg == "") else "ERROR",
|
124 |
status_message = status_msg,
|
125 |
metadata={
|
126 |
"chunk_overlap": config["chunk_overlap"],
|
|
|
129 |
"model": config["model"],
|
130 |
"temperature": config["temperature"],
|
131 |
},
|
132 |
+
#start_time_ms = 123,
|
133 |
+
#end_time_ms = 456,
|
134 |
inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
|
135 |
outputs = {"result": str(result), "completion": str(completion)},
|
136 |
)
|
|
|
157 |
completion = rag_chain(llm, prompt, db)
|
158 |
result = completion["result"]
|
159 |
prompt_template = rag_template
|
160 |
+
chain_name = RetrievalQA.__class__
|
161 |
elif (rag_option == "MongoDB"):
|
162 |
#splits = document_loading_splitting()
|
163 |
#document_storage_mongodb(splits)
|
|
|
165 |
completion = rag_chain(llm, prompt, db)
|
166 |
result = completion["result"]
|
167 |
prompt_template = rag_template
|
168 |
+
chain_name = RetrievalQA.__class__
|
169 |
else:
|
170 |
result = llm_chain(llm, prompt)
|
171 |
completion = result
|
172 |
prompt_template = llm_template
|
173 |
+
chain_name = LLMChain.__class__
|
174 |
except Exception as e:
|
175 |
status_msg = e
|
176 |
raise gr.Error(e)
|
|
|
200 |
<strong>Speech-to-text</strong> via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via
|
201 |
<a href='https://openai.com/blog/new-and-improved-embedding-model'>text-embedding-ada-002</a> model, and <strong>text generation</strong> via
|
202 |
<a href='""" + WEB_URL + """'>gpt-4</a> model. Implementation via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit.
|
203 |
+
RAG evaluation via <a href='https://wandb.ai/bstraehle/openai-llm-rag/workspace'>Weights & Biases</a>."""
|
204 |
|
205 |
gr.close_all()
|
206 |
demo = gr.Interface(fn=invoke,
|