bstraehle commited on
Commit
411f614
·
1 Parent(s): dadebb4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -115,7 +115,7 @@ def rag_chain(llm, prompt, db):
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
- def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg, start_time_ms, end_time_ms):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
  name = chain_name,
@@ -133,7 +133,7 @@ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_n
133
  end_time_ms = end_time_ms,
134
  inputs = {"rag_option": rag_option, "prompt": str(prompt), "prompt_template": str(prompt_template)},
135
  outputs = {"result": str(result), "completion": str(completion)},
136
- model_dict = {"api_type": "openai"}
137
  )
138
  trace.log("test")
139
  wandb.finish()
@@ -173,7 +173,6 @@ def invoke(openai_api_key, rag_option, prompt):
173
  chain_name = "RetrievalQA"
174
  else:
175
  result = llm_chain(llm, prompt)
176
- completion = result
177
  prompt_template = llm_template
178
  chain_name = "LLMChain"
179
  except Exception as e:
@@ -181,7 +180,7 @@ def invoke(openai_api_key, rag_option, prompt):
181
  raise gr.Error(e)
182
  finally:
183
  end_time_ms = round(time.time() * 1000)
184
- wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg, start_time_ms, end_time_ms)
185
  return result
186
 
187
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
+ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg, start_time_ms, end_time_ms, llm):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
  name = chain_name,
 
133
  end_time_ms = end_time_ms,
134
  inputs = {"rag_option": rag_option, "prompt": str(prompt), "prompt_template": str(prompt_template)},
135
  outputs = {"result": str(result), "completion": str(completion)},
136
+ model_dict = {"llm": str(llm)}
137
  )
138
  trace.log("test")
139
  wandb.finish()
 
173
  chain_name = "RetrievalQA"
174
  else:
175
  result = llm_chain(llm, prompt)
 
176
  prompt_template = llm_template
177
  chain_name = "LLMChain"
178
  except Exception as e:
 
180
  raise gr.Error(e)
181
  finally:
182
  end_time_ms = round(time.time() * 1000)
183
+ wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg, start_time_ms, end_time_ms, llm)
184
  return result
185
 
186
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with