bstraehle commited on
Commit
264f8e6
·
1 Parent(s): d9ac74b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -145,7 +145,6 @@ def invoke(openai_api_key, rag_option, prompt):
145
  raise gr.Error("Retrieval Augmented Generation is required.")
146
  if (prompt == ""):
147
  raise gr.Error("Prompt is required.")
148
- completion = ""
149
  try:
150
  llm = ChatOpenAI(model_name = config["model"],
151
  openai_api_key = openai_api_key,
@@ -156,19 +155,23 @@ def invoke(openai_api_key, rag_option, prompt):
156
  db = document_retrieval_chroma(llm, prompt)
157
  completion = rag_chain(llm, prompt, db)
158
  result = completion["result"]
 
159
  elif (rag_option == "MongoDB"):
160
  #splits = document_loading_splitting()
161
  #document_storage_mongodb(splits)
162
  db = document_retrieval_mongodb(llm, prompt)
163
  completion = rag_chain(llm, prompt, db)
164
  result = completion["result"]
 
165
  else:
166
  result = llm_chain(llm, prompt)
 
 
167
  except Exception as e:
168
  completion = e
169
  raise gr.Error(e)
170
  finally:
171
- wandb_trace(prompt, completion, rag_option)
172
  return result
173
 
174
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
145
  raise gr.Error("Retrieval Augmented Generation is required.")
146
  if (prompt == ""):
147
  raise gr.Error("Prompt is required.")
 
148
  try:
149
  llm = ChatOpenAI(model_name = config["model"],
150
  openai_api_key = openai_api_key,
 
155
  db = document_retrieval_chroma(llm, prompt)
156
  completion = rag_chain(llm, prompt, db)
157
  result = completion["result"]
158
+ prompt_template = rag_template
159
  elif (rag_option == "MongoDB"):
160
  #splits = document_loading_splitting()
161
  #document_storage_mongodb(splits)
162
  db = document_retrieval_mongodb(llm, prompt)
163
  completion = rag_chain(llm, prompt, db)
164
  result = completion["result"]
165
+ prompt_template = rag_template
166
  else:
167
  result = llm_chain(llm, prompt)
168
+ completion = result
169
+ prompt_template = llm_template
170
  except Exception as e:
171
  completion = e
172
  raise gr.Error(e)
173
  finally:
174
+ wandb_trace(prompt_template, prompt, completion, rag_option)
175
  return result
176
 
177
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with