bstraehle commited on
Commit
0c788c0
·
1 Parent(s): 2ba16a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -26
app.py CHANGED
@@ -116,31 +116,14 @@ def rag_chain(llm, prompt, db):
116
  return completion, rag_chain
117
 
118
  def wandb_trace(rag_option, prompt, completion, chain, status_msg, start_time_ms, end_time_ms):
119
- if (chain != None):
120
- if (type(chain).__name__ == "LLMChain"):
121
- print("1=" + str(chain.llm.client))
122
- print("1=" + str(chain.llm.async_client))
123
- print("1=" + str(chain.llm.model_name))
124
- print("1=" + str(chain.llm.temperature))
125
- print("2=" + str(chain.prompt.input_variables))
126
- print("2=" + str(chain.prompt.template))
127
- #if (type(chain).__name__ == "RetrievalQA"):
128
- #print(chain.combine_documents_chain.llm_chain.llm)
129
- #print(chain.combine_documents_chain.llm_chain.async_client)
130
- #print(chain.combine_documents_chain.retriever)
131
- #print(chain.combine_documents_chain.vectorstore)
132
- #print("3=" + str(chain.llm))
133
- #print("4=" + str(chain.chain_type_kwargs))
134
- #print("5=" + str(chain.retriever))
135
  wandb.init(project = "openai-llm-rag")
136
  if (rag_option == "Off" or str(status_msg) != ""):
137
  result = completion
138
  else:
139
  result = completion["result"]
140
- documents = completion["source_documents"]
141
- document_0 = completion["source_documents"][0].metadata["source"]
142
- document_1 = completion["source_documents"][1].metadata["source"]
143
- document_2 = completion["source_documents"][2].metadata["source"]
144
  trace = Trace(
145
  kind = "chain",
146
  name = type(chain).__name__ if (chain != None) else "",
@@ -156,14 +139,12 @@ def wandb_trace(rag_option, prompt, completion, chain, status_msg, start_time_ms
156
  inputs = {"rag_option": rag_option if (str(status_msg) == "") else "",
157
  "prompt": str(prompt if (str(status_msg) == "") else ""),
158
  "prompt_template": str((llm_template if (rag_option == "Off") else rag_template) if (str(status_msg) == "") else ""),
159
- "documents": "" if (rag_option == "Off" or str(status_msg) != "") else str(documents),
160
- "document_0": "" if (rag_option == "Off" or str(status_msg) != "") else str(document_0),
161
- "document_1": "" if (rag_option == "Off" or str(status_msg) != "") else str(document_1),
162
- "document_2": "" if (rag_option == "Off" or str(status_msg) != "") else str(document_2)},
163
  outputs = {"result": result},
164
  start_time_ms = start_time_ms,
165
- end_time_ms = end_time_ms#,
166
- #model_dict = {"": x, "": x}
167
  )
168
  trace.log("test")
169
  wandb.finish()
 
116
  return completion, rag_chain
117
 
118
  def wandb_trace(rag_option, prompt, completion, chain, status_msg, start_time_ms, end_time_ms):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  wandb.init(project = "openai-llm-rag")
120
  if (rag_option == "Off" or str(status_msg) != ""):
121
  result = completion
122
  else:
123
  result = completion["result"]
124
+ doc_meta_source_0 = completion["source_documents"][0].metadata["source"]
125
+ doc_meta_source_1 = completion["source_documents"][1].metadata["source"]
126
+ doc_meta_source_2 = completion["source_documents"][2].metadata["source"]
 
127
  trace = Trace(
128
  kind = "chain",
129
  name = type(chain).__name__ if (chain != None) else "",
 
139
  inputs = {"rag_option": rag_option if (str(status_msg) == "") else "",
140
  "prompt": str(prompt if (str(status_msg) == "") else ""),
141
  "prompt_template": str((llm_template if (rag_option == "Off") else rag_template) if (str(status_msg) == "") else ""),
142
+ "doc_meta_source_0": "" if (rag_option == "Off" or str(status_msg) != "") else str(doc_meta_source_0),
143
+ "doc_meta_source_1": "" if (rag_option == "Off" or str(status_msg) != "") else str(doc_meta_source_1),
144
+ "doc_meta_source_2": "" if (rag_option == "Off" or str(status_msg) != "") else str(doc_meta_source_2},
 
145
  outputs = {"result": result},
146
  start_time_ms = start_time_ms,
147
+ end_time_ms = end_time_ms
 
148
  )
149
  trace.log("test")
150
  wandb.finish()