bstraehle commited on
Commit
0408b9d
·
1 Parent(s): 264f8e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -115,7 +115,7 @@ def rag_chain(llm, prompt, db):
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
- def wandb_trace(prompt, completion, rag_option):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
  name="test",
@@ -123,7 +123,6 @@ def wandb_trace(prompt, completion, rag_option):
123
  #status_code=status,
124
  #status_message=status_message,
125
  metadata={
126
- "rag_option": rag_option,
127
  "chunk_overlap": config["chunk_overlap"],
128
  "chunk_size": config["chunk_size"],
129
  "k": config["k"],
@@ -132,7 +131,7 @@ def wandb_trace(prompt, completion, rag_option):
132
  },
133
  #start_time_ms=start_time_ms,
134
  #end_time_ms=end_time_ms,
135
- inputs={"prompt": prompt, "template": "todo"},
136
  outputs={"completion": str(completion)},
137
  )
138
  trace.log("test")
@@ -171,7 +170,7 @@ def invoke(openai_api_key, rag_option, prompt):
171
  completion = e
172
  raise gr.Error(e)
173
  finally:
174
- wandb_trace(prompt_template, prompt, completion, rag_option)
175
  return result
176
 
177
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
+ def wandb_trace(rag_option, prompt, prompt_template, completion):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
  name="test",
 
123
  #status_code=status,
124
  #status_message=status_message,
125
  metadata={
 
126
  "chunk_overlap": config["chunk_overlap"],
127
  "chunk_size": config["chunk_size"],
128
  "k": config["k"],
 
131
  },
132
  #start_time_ms=start_time_ms,
133
  #end_time_ms=end_time_ms,
134
+ inputs={"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
135
  outputs={"completion": str(completion)},
136
  )
137
  trace.log("test")
 
170
  completion = e
171
  raise gr.Error(e)
172
  finally:
173
+ wandb_trace(rag_option, prompt, prompt_template, completion)
174
  return result
175
 
176
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with