bstraehle commited on
Commit
df49bad
·
1 Parent(s): 306ef53

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -115,7 +115,7 @@ def rag_chain(llm, prompt, db):
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
- def wandb_trace(rag_option, prompt, prompt_template, completion, chain_name):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
  name = chain_name,
@@ -132,7 +132,7 @@ def wandb_trace(rag_option, prompt, prompt_template, completion, chain_name):
132
  #start_time_ms = start_time_ms,
133
  #end_time_ms = end_time_ms,
134
  inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
135
- outputs = {"completion": str(completion)},
136
  )
137
  trace.log("test")
138
  wandb.finish()
@@ -155,7 +155,7 @@ def invoke(openai_api_key, rag_option, prompt):
155
  completion = rag_chain(llm, prompt, db)
156
  result = completion["result"]
157
  prompt_template = rag_template
158
- chain_name = type(RetrievalQA).__name__
159
  elif (rag_option == "MongoDB"):
160
  #splits = document_loading_splitting()
161
  #document_storage_mongodb(splits)
@@ -163,17 +163,17 @@ def invoke(openai_api_key, rag_option, prompt):
163
  completion = rag_chain(llm, prompt, db)
164
  result = completion["result"]
165
  prompt_template = rag_template
166
- chain_name = type(RetrievalQA).__name__
167
  else:
168
  result = llm_chain(llm, prompt)
169
  completion = result
170
  prompt_template = llm_template
171
- chain_name = type(LLMChain).__name__
172
  except Exception as e:
173
  completion = e
174
  raise gr.Error(e)
175
  finally:
176
- wandb_trace(rag_option, prompt, prompt_template, completion, chain_name)
177
  return result
178
 
179
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
+ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
  name = chain_name,
 
132
  #start_time_ms = start_time_ms,
133
  #end_time_ms = end_time_ms,
134
  inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
135
+ outputs = {"result": str(result), "completion": str(completion)},
136
  )
137
  trace.log("test")
138
  wandb.finish()
 
155
  completion = rag_chain(llm, prompt, db)
156
  result = completion["result"]
157
  prompt_template = rag_template
158
+ chain_name = type(RetrievalQA)
159
  elif (rag_option == "MongoDB"):
160
  #splits = document_loading_splitting()
161
  #document_storage_mongodb(splits)
 
163
  completion = rag_chain(llm, prompt, db)
164
  result = completion["result"]
165
  prompt_template = rag_template
166
+ chain_name = type(RetrievalQA)
167
  else:
168
  result = llm_chain(llm, prompt)
169
  completion = result
170
  prompt_template = llm_template
171
+ chain_name = type(LLMChain)
172
  except Exception as e:
173
  completion = e
174
  raise gr.Error(e)
175
  finally:
176
+ wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name)
177
  return result
178
 
179
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with