Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,15 +10,25 @@ from langchain.vectorstores import FAISS
|
|
10 |
from langchain.chains.question_answering import load_qa_chain
|
11 |
from langchain.prompts import PromptTemplate
|
12 |
import whisper
|
13 |
-
|
14 |
from dotenv import load_dotenv
|
15 |
|
16 |
-
# Step 2: Load environment
|
17 |
load_dotenv()
|
18 |
groq_api_key = os.getenv("GROQ_API_KEY")
|
19 |
|
20 |
-
# Step 3:
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
# Step 4: Function to read files and extract text
|
24 |
def extract_text(file):
|
@@ -59,15 +69,18 @@ def get_text_chunks(text):
|
|
59 |
|
60 |
# Step 6: Function for converting chunks into embeddings and saving the FAISS index
|
61 |
def get_vector_store(text_chunks):
|
62 |
-
embeddings =
|
63 |
-
|
64 |
-
|
65 |
-
# Ensure the directory exists
|
66 |
-
if not os.path.exists("faiss_index"):
|
67 |
-
os.makedirs("faiss_index")
|
68 |
|
69 |
-
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
# Step 7: Function to implement the Groq Model
|
73 |
def get_conversational_chain():
|
@@ -76,22 +89,26 @@ def get_conversational_chain():
|
|
76 |
the provided context, just say, "The answer is not available in the context." Do not provide a wrong answer.\n\n
|
77 |
Context:\n {context}\n
|
78 |
Question: \n{question}\n
|
79 |
-
|
80 |
Answer:
|
81 |
"""
|
82 |
-
|
|
|
|
|
83 |
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
84 |
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
85 |
return chain
|
86 |
|
87 |
# Step 8: Function to take inputs from user and generate response
|
88 |
def user_input(user_question):
|
89 |
-
embeddings =
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
95 |
|
96 |
# Step 9: Streamlit App
|
97 |
def main():
|
|
|
10 |
from langchain.chains.question_answering import load_qa_chain
|
11 |
from langchain.prompts import PromptTemplate
|
12 |
import whisper
|
13 |
+
import requests
|
14 |
from dotenv import load_dotenv
|
15 |
|
16 |
+
# Step 2: Load environment variables
|
17 |
load_dotenv()
|
18 |
groq_api_key = os.getenv("GROQ_API_KEY")
|
19 |
|
20 |
+
# Step 3: Custom function to interact with the Groq API
|
21 |
+
def get_groq_embeddings(text_chunks):
|
22 |
+
url = "https://api.groq.com/your-endpoint" # Replace with the correct Groq API endpoint
|
23 |
+
headers = {"Authorization": f"Bearer {groq_api_key}"}
|
24 |
+
payload = {"text_chunks": text_chunks}
|
25 |
+
response = requests.post(url, json=payload, headers=headers)
|
26 |
+
|
27 |
+
if response.status_code == 200:
|
28 |
+
return response.json()["embeddings"]
|
29 |
+
else:
|
30 |
+
st.error(f"Error: {response.status_code} - {response.text}")
|
31 |
+
return None
|
32 |
|
33 |
# Step 4: Function to read files and extract text
|
34 |
def extract_text(file):
|
|
|
69 |
|
70 |
# Step 6: Function for converting chunks into embeddings and saving the FAISS index
|
71 |
def get_vector_store(text_chunks):
|
72 |
+
embeddings = get_groq_embeddings(text_chunks)
|
73 |
+
if embeddings:
|
74 |
+
vector_store = FAISS.from_texts(text_chunks, embedding=embeddings)
|
|
|
|
|
|
|
75 |
|
76 |
+
# Ensure the directory exists
|
77 |
+
if not os.path.exists("faiss_index"):
|
78 |
+
os.makedirs("faiss_index")
|
79 |
+
|
80 |
+
vector_store.save_local("faiss_index")
|
81 |
+
print("FAISS index saved successfully.")
|
82 |
+
else:
|
83 |
+
st.error("Failed to retrieve embeddings from Groq API.")
|
84 |
|
85 |
# Step 7: Function to implement the Groq Model
|
86 |
def get_conversational_chain():
|
|
|
89 |
the provided context, just say, "The answer is not available in the context." Do not provide a wrong answer.\n\n
|
90 |
Context:\n {context}\n
|
91 |
Question: \n{question}\n
|
|
|
92 |
Answer:
|
93 |
"""
|
94 |
+
# Assuming we use the Groq API for the model as well
|
95 |
+
# Replace with your Groq model call or other LLM API
|
96 |
+
model = get_groq_embeddings # Placeholder for the actual model call
|
97 |
prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
98 |
chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
|
99 |
return chain
|
100 |
|
101 |
# Step 8: Function to take inputs from user and generate response
|
102 |
def user_input(user_question):
|
103 |
+
embeddings = get_groq_embeddings([user_question])
|
104 |
+
if embeddings:
|
105 |
+
new_db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
|
106 |
+
docs = new_db.similarity_search(user_question)
|
107 |
+
chain = get_conversational_chain()
|
108 |
+
response = chain({"input_documents": docs, "question": user_question}, return_only_outputs=True)
|
109 |
+
return response["output_text"]
|
110 |
+
else:
|
111 |
+
return "Failed to retrieve response from Groq API."
|
112 |
|
113 |
# Step 9: Streamlit App
|
114 |
def main():
|