JohnsonMLEngineer commited on
Commit
b839ee6
·
verified ·
1 Parent(s): 74ae1ff

Upload 8 files

Browse files

The Bharatiya Nyaya Sanhita (BNS), replacing the Indian Penal Code on July 1, 2024, offers a modern legal framework. This chatbot provides reliable information on BNS for reference, catering to legal professionals, students, and the public. Note: It is for informational use only, not a substitute for legal advice

Files changed (8) hide show
  1. app.py +44 -0
  2. bns3.pdf +0 -0
  3. chatbot.py +5 -0
  4. config.py +7 -0
  5. document_processor.py +8 -0
  6. embeddings_store.py +35 -0
  7. rag_initializer.py +13 -0
  8. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from config import setup_environment
3
+ from rag_initializer import initialize_rag_chain
4
+ from chatbot import chatbot_response
5
+
6
+ setup_environment()
7
+
8
+ st.set_page_config(page_title="Bharatiya Nyaya Sanhita Chatbot", page_icon="⚖️", layout="wide")
9
+
10
+ def main():
11
+ st.title("Bharatiya Nyaya Sanhita Chatbot")
12
+ st.sidebar.info("Bharatiya Nyaya Sanhita came into effect on 1 July, 2024 after being passed by the parliament in December 2023 to replace the Indian Penal Code (IPC), which dated back to the period of British India")
13
+ st.sidebar.warning("Disclaimer: This chatbot provides information based on the Bharatiya Nyaya Sanhita for reference purposes only. It is not a substitute for professional legal advice.")
14
+
15
+ rag_chain = initialize_rag_chain()
16
+
17
+ if rag_chain is None:
18
+ st.error("Failed to initialize the chatbot. Please try again later.")
19
+ return
20
+
21
+ st.write("Welcome! I'm here to help you with information about the Bharatiya Nyaya Sanhita. What would you like to know?")
22
+
23
+ if "messages" not in st.session_state:
24
+ st.session_state.messages = []
25
+
26
+ for message in st.session_state.messages:
27
+ with st.chat_message(message["role"]):
28
+ st.markdown(message["content"])
29
+
30
+ if prompt := st.chat_input("Your question:"):
31
+ st.session_state.messages.append({"role": "user", "content": prompt})
32
+ with st.chat_message("user"):
33
+ st.markdown(prompt)
34
+
35
+ with st.chat_message("assistant"):
36
+ with st.spinner("AI is thinking..."):
37
+ response = chatbot_response(prompt, rag_chain)
38
+ st.markdown(response)
39
+
40
+ st.session_state.messages.append({"role": "assistant", "content": response})
41
+
42
+
43
+ if __name__ == "__main__":
44
+ main()
bns3.pdf ADDED
Binary file (231 kB). View file
 
chatbot.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ def chatbot_response(query, rag_chain):
2
+ try:
3
+ return rag_chain.invoke({"input": query})["answer"]
4
+ except Exception as e:
5
+ return f"Error processing query: {str(e)}"
config.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ GOOGLE_API_KEY = "AIzaSyBKzMLEIN6IoNFzpxJtrNH-c53Sx1DQzNU"
5
+ def setup_environment():
6
+ load_dotenv()
7
+ return GOOGLE_API_KEY
document_processor.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import PyPDFLoader
2
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
3
+
4
+ def read_documents(directory):
5
+ return PyPDFLoader(directory).load()
6
+
7
+ def chunk_data(docs, chunk_size=800, chunk_overlap=40):
8
+ return RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap).split_documents(docs)
embeddings_store.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
2
+ from langchain_chroma import Chroma
3
+ from langchain.chains import create_retrieval_chain
4
+ from langchain.chains.combine_documents import create_stuff_documents_chain
5
+ from langchain_core.prompts import ChatPromptTemplate
6
+
7
+ def create_embeddings_and_store(doc_chunks):
8
+ vectorstore = Chroma.from_documents(
9
+ documents=doc_chunks,
10
+ embedding=GoogleGenerativeAIEmbeddings(model="models/embedding-001")
11
+ )
12
+
13
+ retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 2})
14
+
15
+ system_prompt = """
16
+ You are an AI assistant for question-answering tasks about the Bharatiya Nyaya Sanhita.Bharatiya Nyaya Sanhita came into effect on 1 July, 2024 after being passed by the parliament in December 2023 to replace the Indian Penal Code (IPC), which dated back to the period of British India
17
+ Analyze the provided context and answer the user's question concisely. Follow these guidelines:
18
+
19
+ 1. Use only the given context to formulate your response.
20
+ 2. If the answer cannot be derived from the context, state "I don't have enough information to answer this question."
21
+ 3. Provide a clear, direct answer in seven sentences or fewer.
22
+
23
+ Context:
24
+ {context}
25
+ """
26
+
27
+ prompt = ChatPromptTemplate.from_messages([
28
+ ("system", system_prompt),
29
+ ("human", "{input}"),
30
+ ])
31
+
32
+ llm_model = ChatGoogleGenerativeAI(model="gemini-1.5-pro", temperature=0.3, max_tokens=250)
33
+
34
+ question_answer_chain = create_stuff_documents_chain(llm_model, prompt)
35
+ return create_retrieval_chain(retriever, question_answer_chain)
rag_initializer.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from document_processor import read_documents, chunk_data
3
+ from embeddings_store import create_embeddings_and_store
4
+
5
+ @st.cache_resource
6
+ def initialize_rag_chain():
7
+ try:
8
+ docs = read_documents("bns3.pdf")
9
+ doc_chunks = chunk_data(docs)
10
+ return create_embeddings_and_store(doc_chunks)
11
+ except Exception as e:
12
+ st.error(f"Error initializing RAG chain: {str(e)}")
13
+ return None
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ python-dotenv
3
+ langchain
4
+ langchain-google-genai
5
+ langchain-chroma
6
+ google-generativeai
7
+ langchain_community
8
+ langchain_experimental
9
+ pypdf