Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
import re
|
3 |
import os
|
4 |
-
from langchain.chains import ConversationalRetrievalChain
|
5 |
from langchain_chroma import Chroma
|
6 |
from langchain_community.document_loaders import WebBaseLoader
|
7 |
from langchain_core.output_parsers import StrOutputParser
|
@@ -109,9 +108,20 @@ query = st.text_input("Ask a question based on the blog post", placeholder="Type
|
|
109 |
if 'chat_history' not in st.session_state:
|
110 |
st.session_state['chat_history'] = []
|
111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
# Submit button for chat
|
113 |
if st.button("Submit Query"):
|
114 |
-
if query
|
|
|
|
|
|
|
|
|
115 |
# Blog loading logic based on user input URL
|
116 |
loader = WebBaseLoader(
|
117 |
web_paths=(url_input,), # Use the user-input URL
|
@@ -137,15 +147,9 @@ if st.button("Submit Query"):
|
|
137 |
def format_docs(docs):
|
138 |
return "\n\n".join(doc.page_content for doc in docs)
|
139 |
|
140 |
-
#
|
141 |
-
class CustomLanguageModel:
|
142 |
-
def generate(self, prompt, context):
|
143 |
-
# Custom implementation or call to an API
|
144 |
-
# For demonstration, let's use a simple placeholder response
|
145 |
-
return f"Response to query '{prompt}' based on context."
|
146 |
-
|
147 |
custom_llm = CustomLanguageModel()
|
148 |
-
|
149 |
rag_chain = (
|
150 |
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
151 |
| prompt
|
|
|
1 |
import streamlit as st
|
2 |
import re
|
3 |
import os
|
|
|
4 |
from langchain_chroma import Chroma
|
5 |
from langchain_community.document_loaders import WebBaseLoader
|
6 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
108 |
if 'chat_history' not in st.session_state:
|
109 |
st.session_state['chat_history'] = []
|
110 |
|
111 |
+
# CustomLanguageModel class with proper context argument
|
112 |
+
class CustomLanguageModel:
|
113 |
+
def generate(self, prompt, context):
|
114 |
+
# This function should handle both prompt and context
|
115 |
+
# For now, we return a placeholder response for demo purposes
|
116 |
+
return f"Generated response based on '{prompt}' and context provided."
|
117 |
+
|
118 |
# Submit button for chat
|
119 |
if st.button("Submit Query"):
|
120 |
+
if not query:
|
121 |
+
st.warning("Please enter a query before submitting!")
|
122 |
+
elif not url_input:
|
123 |
+
st.warning("Please enter a valid URL in the sidebar.")
|
124 |
+
else:
|
125 |
# Blog loading logic based on user input URL
|
126 |
loader = WebBaseLoader(
|
127 |
web_paths=(url_input,), # Use the user-input URL
|
|
|
147 |
def format_docs(docs):
|
148 |
return "\n\n".join(doc.page_content for doc in docs)
|
149 |
|
150 |
+
# Initialize the language model
|
|
|
|
|
|
|
|
|
|
|
|
|
151 |
custom_llm = CustomLanguageModel()
|
152 |
+
|
153 |
rag_chain = (
|
154 |
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
155 |
| prompt
|