bstraehle commited on
Commit
306ef53
·
1 Parent(s): 0408b9d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -115,13 +115,13 @@ def rag_chain(llm, prompt, db):
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
- def wandb_trace(rag_option, prompt, prompt_template, completion):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
- name="test",
122
- kind="chain",
123
- #status_code=status,
124
- #status_message=status_message,
125
  metadata={
126
  "chunk_overlap": config["chunk_overlap"],
127
  "chunk_size": config["chunk_size"],
@@ -129,10 +129,10 @@ def wandb_trace(rag_option, prompt, prompt_template, completion):
129
  "model": config["model"],
130
  "temperature": config["temperature"],
131
  },
132
- #start_time_ms=start_time_ms,
133
- #end_time_ms=end_time_ms,
134
- inputs={"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
135
- outputs={"completion": str(completion)},
136
  )
137
  trace.log("test")
138
  wandb.finish()
@@ -155,6 +155,7 @@ def invoke(openai_api_key, rag_option, prompt):
155
  completion = rag_chain(llm, prompt, db)
156
  result = completion["result"]
157
  prompt_template = rag_template
 
158
  elif (rag_option == "MongoDB"):
159
  #splits = document_loading_splitting()
160
  #document_storage_mongodb(splits)
@@ -162,15 +163,17 @@ def invoke(openai_api_key, rag_option, prompt):
162
  completion = rag_chain(llm, prompt, db)
163
  result = completion["result"]
164
  prompt_template = rag_template
 
165
  else:
166
  result = llm_chain(llm, prompt)
167
  completion = result
168
  prompt_template = llm_template
 
169
  except Exception as e:
170
  completion = e
171
  raise gr.Error(e)
172
  finally:
173
- wandb_trace(rag_option, prompt, prompt_template, completion)
174
  return result
175
 
176
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
+ def wandb_trace(rag_option, prompt, prompt_template, completion, chain_name):
119
  wandb.init(project = "openai-llm-rag")
120
  trace = Trace(
121
+ name = chain_name,
122
+ kind = "chain",
123
+ #status_code = status,
124
+ #status_message = status_message,
125
  metadata={
126
  "chunk_overlap": config["chunk_overlap"],
127
  "chunk_size": config["chunk_size"],
 
129
  "model": config["model"],
130
  "temperature": config["temperature"],
131
  },
132
+ #start_time_ms = start_time_ms,
133
+ #end_time_ms = end_time_ms,
134
+ inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
135
+ outputs = {"completion": str(completion)},
136
  )
137
  trace.log("test")
138
  wandb.finish()
 
155
  completion = rag_chain(llm, prompt, db)
156
  result = completion["result"]
157
  prompt_template = rag_template
158
+ chain_name = type(RetrievalQA).__name__
159
  elif (rag_option == "MongoDB"):
160
  #splits = document_loading_splitting()
161
  #document_storage_mongodb(splits)
 
163
  completion = rag_chain(llm, prompt, db)
164
  result = completion["result"]
165
  prompt_template = rag_template
166
+ chain_name = type(RetrievalQA).__name__
167
  else:
168
  result = llm_chain(llm, prompt)
169
  completion = result
170
  prompt_template = llm_template
171
+ chain_name = type(LLMChain).__name__
172
  except Exception as e:
173
  completion = e
174
  raise gr.Error(e)
175
  finally:
176
+ wandb_trace(rag_option, prompt, prompt_template, completion, chain_name)
177
  return result
178
 
179
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with