Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,38 +5,38 @@ from langchain.llms import HuggingFacePipeline
|
|
5 |
from langchain.chains import RetrievalQA
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
7 |
import json
|
8 |
-
import os
|
9 |
|
10 |
-
#
|
11 |
-
|
12 |
|
13 |
-
#
|
14 |
-
|
15 |
-
data = json.load(f)
|
16 |
|
17 |
-
|
18 |
-
|
|
|
19 |
|
20 |
-
#
|
21 |
-
|
22 |
|
23 |
-
#
|
24 |
-
|
25 |
|
26 |
-
#
|
27 |
-
|
28 |
-
model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
|
29 |
-
text_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
30 |
-
llm = HuggingFacePipeline(pipeline=text_pipeline)
|
31 |
|
32 |
-
#
|
33 |
-
|
|
|
|
|
|
|
34 |
|
35 |
-
#
|
36 |
-
|
37 |
-
|
|
|
38 |
|
39 |
-
if query:
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
5 |
from langchain.chains import RetrievalQA
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
7 |
import json
|
|
|
8 |
|
9 |
+
# Streamlit UI
|
10 |
+
st.title("Indian Constitution Q&A RAG App")
|
11 |
|
12 |
+
# Upload JSON File
|
13 |
+
uploaded_file = st.file_uploader("Upload Constitution JSON", type="json")
|
|
|
14 |
|
15 |
+
if uploaded_file is not None:
|
16 |
+
# Load JSON dataset
|
17 |
+
data = json.load(uploaded_file)
|
18 |
|
19 |
+
# Extract questions and answers
|
20 |
+
texts = [f"Q: {item['question']}\nA: {item['answer']}" for item in data]
|
21 |
|
22 |
+
# Load the embedding model
|
23 |
+
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
|
24 |
|
25 |
+
# Create FAISS vector database
|
26 |
+
vector_db = FAISS.from_texts(texts, embeddings)
|
|
|
|
|
|
|
27 |
|
28 |
+
# Load Open-Source LLM (LLaMA-2 7B Open Chat Model)
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
|
30 |
+
model = AutoModelForCausalLM.from_pretrained("NousResearch/Llama-2-7b-chat-hf")
|
31 |
+
text_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
32 |
+
llm = HuggingFacePipeline(pipeline=text_pipeline)
|
33 |
|
34 |
+
# Create RAG pipeline
|
35 |
+
qa_chain = RetrievalQA.from_chain_type(llm=llm, retriever=vector_db.as_retriever())
|
36 |
+
|
37 |
+
query = st.text_input("Enter your legal query:")
|
38 |
|
39 |
+
if query:
|
40 |
+
response = qa_chain.run(query)
|
41 |
+
st.write("### AI-Generated Answer:")
|
42 |
+
st.write(response)
|