Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
|
4 |
from langchain.chains import LLMChain, RetrievalQA
|
5 |
from langchain.chat_models import ChatOpenAI
|
@@ -32,8 +32,6 @@ MONGODB_INDEX_NAME = "default"
|
|
32 |
|
33 |
description = os.environ["DESCRIPTION"]
|
34 |
|
35 |
-
#langchain.verbose = True
|
36 |
-
|
37 |
config = {
|
38 |
"chunk_overlap": 150,
|
39 |
"chunk_size": 1500,
|
@@ -104,7 +102,7 @@ def document_retrieval_mongodb(llm, prompt):
|
|
104 |
return db
|
105 |
|
106 |
def llm_chain(llm, prompt):
|
107 |
-
llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT, verbose =
|
108 |
completion = llm_chain.run({"question": prompt})
|
109 |
return completion, llm_chain
|
110 |
|
@@ -113,14 +111,14 @@ def rag_chain(llm, prompt, db):
|
|
113 |
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
|
114 |
retriever = db.as_retriever(search_kwargs = {"k": config["k"]}),
|
115 |
return_source_documents = True,
|
116 |
-
verbose =
|
117 |
completion = rag_chain({"query": prompt})
|
118 |
return completion, rag_chain
|
119 |
|
120 |
def wandb_trace(rag_option, prompt, completion, chain, status_msg, start_time_ms, end_time_ms):
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
wandb.init(project = "openai-llm-rag")
|
125 |
if (rag_option == "Off" or str(status_msg) != ""):
|
126 |
result = completion
|
|
|
1 |
import gradio as gr
|
2 |
+
import openai, os, time, wandb
|
3 |
|
4 |
from langchain.chains import LLMChain, RetrievalQA
|
5 |
from langchain.chat_models import ChatOpenAI
|
|
|
32 |
|
33 |
description = os.environ["DESCRIPTION"]
|
34 |
|
|
|
|
|
35 |
config = {
|
36 |
"chunk_overlap": 150,
|
37 |
"chunk_size": 1500,
|
|
|
102 |
return db
|
103 |
|
104 |
def llm_chain(llm, prompt):
|
105 |
+
llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT, verbose = False)
|
106 |
completion = llm_chain.run({"question": prompt})
|
107 |
return completion, llm_chain
|
108 |
|
|
|
111 |
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
|
112 |
retriever = db.as_retriever(search_kwargs = {"k": config["k"]}),
|
113 |
return_source_documents = True,
|
114 |
+
verbose = False)
|
115 |
completion = rag_chain({"query": prompt})
|
116 |
return completion, rag_chain
|
117 |
|
118 |
def wandb_trace(rag_option, prompt, completion, chain, status_msg, start_time_ms, end_time_ms):
|
119 |
+
print(chain.inputKey)
|
120 |
+
print(chain.outputKey)
|
121 |
+
print(chain.retriever)
|
122 |
wandb.init(project = "openai-llm-rag")
|
123 |
if (rag_option == "Off" or str(status_msg) != ""):
|
124 |
result = completion
|