Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,6 @@ from langchain_core.output_parsers import StrOutputParser
|
|
7 |
from langchain_core.runnables import RunnablePassthrough
|
8 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
9 |
from sentence_transformers import SentenceTransformer
|
10 |
-
from langchain import hub
|
11 |
import bs4
|
12 |
import torch
|
13 |
|
@@ -114,6 +113,11 @@ class CustomLanguageModel:
|
|
114 |
# Implement logic to generate a response based on prompt and context
|
115 |
return f"Generated response based on prompt: '{prompt}' and context: '{context}'."
|
116 |
|
|
|
|
|
|
|
|
|
|
|
117 |
# Submit button for chat
|
118 |
if st.button("Submit Query"):
|
119 |
if not query:
|
@@ -141,23 +145,27 @@ if st.button("Submit Query"):
|
|
141 |
|
142 |
# Retrieve and generate using the relevant snippets of the blog
|
143 |
retriever = vectorstore.as_retriever()
|
144 |
-
prompt = hub.pull("rlm/rag-prompt")
|
145 |
|
|
|
|
|
|
|
|
|
146 |
def format_docs(docs):
|
147 |
return "\n\n".join(doc.page_content for doc in docs)
|
148 |
|
|
|
|
|
149 |
# Initialize the language model
|
150 |
custom_llm = CustomLanguageModel()
|
151 |
|
152 |
-
#
|
153 |
-
|
154 |
-
context = format_docs(retrieved_docs)
|
155 |
|
156 |
rag_chain = (
|
157 |
-
{"context": context, "question": query}
|
158 |
-
| prompt
|
159 |
-
| (lambda data: custom_llm.generate(data["question"], data["context"]))
|
160 |
-
| StrOutputParser()
|
161 |
)
|
162 |
|
163 |
# Generate the answer using the user's query
|
|
|
7 |
from langchain_core.runnables import RunnablePassthrough
|
8 |
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
9 |
from sentence_transformers import SentenceTransformer
|
|
|
10 |
import bs4
|
11 |
import torch
|
12 |
|
|
|
113 |
# Implement logic to generate a response based on prompt and context
|
114 |
return f"Generated response based on prompt: '{prompt}' and context: '{context}'."
|
115 |
|
116 |
+
# Define a callable class for RAGPrompt
|
117 |
+
class RAGPrompt:
|
118 |
+
def __call__(self, data):
|
119 |
+
return {"question": data["question"], "context": data["context"]}
|
120 |
+
|
121 |
# Submit button for chat
|
122 |
if st.button("Submit Query"):
|
123 |
if not query:
|
|
|
145 |
|
146 |
# Retrieve and generate using the relevant snippets of the blog
|
147 |
retriever = vectorstore.as_retriever()
|
|
|
148 |
|
149 |
+
# Retrieve relevant documents
|
150 |
+
retrieved_docs = retriever.get_relevant_documents(query)
|
151 |
+
|
152 |
+
# Format the retrieved documents
|
153 |
def format_docs(docs):
|
154 |
return "\n\n".join(doc.page_content for doc in docs)
|
155 |
|
156 |
+
context = format_docs(retrieved_docs)
|
157 |
+
|
158 |
# Initialize the language model
|
159 |
custom_llm = CustomLanguageModel()
|
160 |
|
161 |
+
# Initialize RAG chain using the prompt
|
162 |
+
prompt = RAGPrompt()
|
|
|
163 |
|
164 |
rag_chain = (
|
165 |
+
{"context": context, "question": query} # Start the chain with context and question
|
166 |
+
| prompt # Use the custom prompt
|
167 |
+
| (lambda data: custom_llm.generate(data["question"], data["context"])) # Pass question and context to LLM
|
168 |
+
| StrOutputParser() # Parse the output
|
169 |
)
|
170 |
|
171 |
# Generate the answer using the user's query
|