AreesaAshfaq commited on
Commit
5e084ab
·
verified ·
1 Parent(s): b5124d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -47
app.py CHANGED
@@ -63,57 +63,40 @@ else:
63
  ),
64
  )
65
  docs = loader.load()
66
- if not docs:
67
- st.error("No documents were loaded. Please check the URL and try again.")
68
- return None
69
-
70
- st.write(f"Loaded {len(docs)} documents.")
71
-
72
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
73
  splits = text_splitter.split_documents(docs)
74
- if not splits:
75
- st.error("No document splits were created. Please check the document content.")
76
- return None
77
-
78
- st.write(f"Created {len(splits)} document splits.")
79
-
80
  vectorstore = Chroma.from_documents(documents=splits, embedding=embedding_model)
81
- if vectorstore is None:
82
- st.error("Failed to create the vectorstore.")
83
- return None
84
-
85
  return vectorstore
86
  except Exception as e:
87
  st.error(f"An error occurred while loading the blog: {e}")
88
  return None
89
-
90
-
91
- # Load the data if a URL is provided
92
- if blog_url:
93
- vectorstore = load_data(blog_url)
94
- if vectorstore:
95
- # Streamlit UI for question input
96
- question = st.text_input("Enter your question:")
97
-
98
- if question:
99
- retriever = vectorstore.as_retriever()
100
- prompt = hub.pull("rlm/rag-prompt", api_key=api_key_langchain)
101
-
102
- def format_docs(docs):
103
- return "\n\n".join(doc.page_content for doc in docs)
104
-
105
- rag_chain = (
106
- {"context": retriever | format_docs, "question": RunnablePassthrough()}
107
- | prompt
108
- | llm
109
- | StrOutputParser()
110
- )
111
-
112
- # Example invocation
113
- try:
114
- result = rag_chain.invoke(question)
115
- st.write("Answer:", result)
116
- except Exception as e:
117
- st.error(f"An error occurred while generating the answer: {e}")
118
- else:
119
- st.write("Failed to load the blog content. Please check the URL and try again.")
 
63
  ),
64
  )
65
  docs = loader.load()
 
 
 
 
 
 
66
  text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
67
  splits = text_splitter.split_documents(docs)
 
 
 
 
 
 
68
  vectorstore = Chroma.from_documents(documents=splits, embedding=embedding_model)
 
 
 
 
69
  return vectorstore
70
  except Exception as e:
71
  st.error(f"An error occurred while loading the blog: {e}")
72
  return None
73
+
74
+ # Load the data if a URL is provided
75
+ if blog_url:
76
+ vectorstore = load_data(blog_url)
77
+ if vectorstore:
78
+ # Streamlit UI for question input
79
+ question = st.text_input("Enter your question:")
80
+
81
+ if question:
82
+ retriever = vectorstore.as_retriever()
83
+ prompt = hub.pull("rlm/rag-prompt", api_key=api_key_langchain)
84
+
85
+ def format_docs(docs):
86
+ return "\n\n".join(doc.page_content for doc in docs)
87
+
88
+ rag_chain = (
89
+ {"context": retriever | format_docs, "question": RunnablePassthrough()}
90
+ | prompt
91
+ | llm
92
+ | StrOutputParser()
93
+ )
94
+
95
+ # Example invocation
96
+ try:
97
+ result = rag_chain.invoke(question)
98
+ st.write("Answer:", result)
99
+ except Exception as e:
100
+ st.error(f"An error occurred while generating the answer: {e}")
101
+ else:
102
+ st.write("Failed to load the blog content. Please check the URL and try again.")