AreesaAshfaq commited on
Commit
f9a1a72
·
verified ·
1 Parent(s): f8730ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -27
app.py CHANGED
@@ -8,7 +8,6 @@ from langchain_core.runnables import RunnablePassthrough
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter
9
  import bs4
10
  import torch
11
- import getpass
12
 
13
  # Prompt the user to enter their Langchain API key
14
  api_key_langchain = st.text_input("Enter your LANGCHAIN_API_KEY", type="password")
@@ -47,13 +46,11 @@ else:
47
  embedding_model = SentenceTransformerEmbedding('all-MiniLM-L6-v2')
48
 
49
  # Load, chunk, and index the contents of the blog
50
- def load_data():
51
  loader = WebBaseLoader(
52
- web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
53
  bs_kwargs=dict(
54
- parse_only=bs4.SoupStrainer(
55
- class_=("post-content", "post-title", "post-header")
56
- )
57
  ),
58
  )
59
  docs = loader.load()
@@ -62,30 +59,34 @@ else:
62
  vectorstore = Chroma.from_documents(documents=splits, embedding=embedding_model)
63
  return vectorstore
64
 
65
- vectorstore = load_data()
66
-
67
  # Streamlit UI
68
- st.title("Blog Retrieval and Question Answering")
69
 
70
- question = st.text_input("Enter your question:")
 
71
 
72
- if question:
73
- retriever = vectorstore.as_retriever()
74
- prompt = hub.pull("rlm/rag-prompt", api_key=api_key_langchain)
75
 
76
- def format_docs(docs):
77
- return "\n\n".join(doc.page_content for doc in docs)
78
 
79
- rag_chain = (
80
- {"context": retriever | format_docs, "question": RunnablePassthrough()}
81
- | prompt
82
- | llm # Replace with your LLM or appropriate function if needed
83
- | StrOutputParser()
84
- )
 
 
 
 
 
 
 
85
 
86
- # Example invocation
87
- try:
88
- result = rag_chain.invoke(question)
89
- st.write("Answer:", result)
90
- except Exception as e:
91
- st.error(f"An error occurred: {e}")
 
8
  from langchain_text_splitters import RecursiveCharacterTextSplitter
9
  import bs4
10
  import torch
 
11
 
12
  # Prompt the user to enter their Langchain API key
13
  api_key_langchain = st.text_input("Enter your LANGCHAIN_API_KEY", type="password")
 
46
  embedding_model = SentenceTransformerEmbedding('all-MiniLM-L6-v2')
47
 
48
  # Load, chunk, and index the contents of the blog
49
+ def load_data(url):
50
  loader = WebBaseLoader(
51
+ web_paths=(url,),
52
  bs_kwargs=dict(
53
+ parse_only=bs4.SoupStrainer()
 
 
54
  ),
55
  )
56
  docs = loader.load()
 
59
  vectorstore = Chroma.from_documents(documents=splits, embedding=embedding_model)
60
  return vectorstore
61
 
 
 
62
  # Streamlit UI
63
+ st.title("URL Retrieval and Question Answering")
64
 
65
+ # Input URL from user
66
+ url = st.text_input("Enter the URL:")
67
 
68
+ if url:
69
+ vectorstore = load_data(url)
 
70
 
71
+ question = st.text_input("Enter your question:")
 
72
 
73
+ if question:
74
+ retriever = vectorstore.as_retriever()
75
+ prompt = hub.pull("rlm/rag-prompt", api_key=api_key_langchain)
76
+
77
+ def format_docs(docs):
78
+ return "\n\n".join(doc.page_content for doc in docs)
79
+
80
+ rag_chain = (
81
+ {"context": retriever | format_docs, "question": RunnablePassthrough()}
82
+ | prompt
83
+ | llm # Replace with your LLM or appropriate function if needed
84
+ | StrOutputParser()
85
+ )
86
 
87
+ # Example invocation
88
+ try:
89
+ result = rag_chain.invoke(question)
90
+ st.write("Answer:", result)
91
+ except Exception as e:
92
+ st.error(f"An error occurred: {e}")