Spaces:
Build error
Build error
Initial Draft
Browse files
app.py
CHANGED
@@ -1,8 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import requests
|
4 |
-
import time
|
5 |
-
import sys
|
6 |
|
7 |
from langchain_community.document_loaders import PyPDFLoader
|
8 |
from langchain.text_splitter import CharacterTextSplitter
|
@@ -10,7 +8,7 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
|
|
10 |
from langchain.vectorstores.faiss import FAISS
|
11 |
|
12 |
from langchain.prompts.prompt import PromptTemplate
|
13 |
-
from langchain_community.llms import
|
14 |
from langchain.chains import RetrievalQA
|
15 |
|
16 |
# Upload pdf file into 'pdf-data' folder if it does not exist
|
@@ -40,10 +38,10 @@ def fn_create_vector_db(mv_pdf_input_file, mv_processing_message):
|
|
40 |
|
41 |
lv_file_name = mv_pdf_input_file.name[:-4] + ".vectorstore"
|
42 |
|
43 |
-
if not os.path.exists("vectordb
|
44 |
-
os.makedirs("vectordb
|
45 |
|
46 |
-
lv_temp_file_path = os.path.join("vectordb
|
47 |
lv_embeddings = HuggingFaceEmbeddings(
|
48 |
model_name="sentence-transformers/all-mpnet-base-v2",
|
49 |
model_kwargs={'device': 'cpu'}
|
@@ -111,6 +109,8 @@ def fn_download_llm_models(mv_selected_model, mv_processing_message):
|
|
111 |
|
112 |
if mv_selected_model == 'microsoft/phi-2':
|
113 |
lv_download_url = "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q2_K.gguf"
|
|
|
|
|
114 |
elif mv_selected_model == 'mistralai/Mistral-7B-Instruct-v0.2':
|
115 |
lv_download_url = "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q2_K.gguf"
|
116 |
|
@@ -160,21 +160,24 @@ def fn_generate_QnA_response(mv_selected_model, mv_user_question, lv_vector_stor
|
|
160 |
if mv_selected_model == 'microsoft/phi-2':
|
161 |
lv_model_path = "model/phi-2.Q2_K.gguf"
|
162 |
lv_model_type = "pi"
|
|
|
|
|
|
|
163 |
elif mv_selected_model == 'mistralai/Mistral-7B-Instruct-v0.2':
|
164 |
lv_model_path = "model/mistral-7b-instruct-v0.2.Q2_K.gguf"
|
165 |
lv_model_type = "mistral"
|
166 |
-
|
167 |
-
print("Model Absolute location -" +lv_model_path)
|
168 |
|
169 |
print("Step4: Generating LLM response")
|
170 |
fn_display_user_messages("Step4: Generating LLM response","Info", mv_processing_message)
|
171 |
|
172 |
-
lv_model =
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
|
|
178 |
lv_retriever = lv_vector_store.as_retriever(search_kwargs={'k': 2})
|
179 |
lv_qa_chain = RetrievalQA.from_chain_type( llm=lv_model,
|
180 |
chain_type='stuff',
|
@@ -208,6 +211,7 @@ def main():
|
|
208 |
mv_selected_model = col3.selectbox('Select Model',
|
209 |
[
|
210 |
'microsoft/phi-2',
|
|
|
211 |
'mistralai/Mistral-7B-Instruct-v0.2'
|
212 |
]
|
213 |
)
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
import requests
|
|
|
|
|
4 |
|
5 |
from langchain_community.document_loaders import PyPDFLoader
|
6 |
from langchain.text_splitter import CharacterTextSplitter
|
|
|
8 |
from langchain.vectorstores.faiss import FAISS
|
9 |
|
10 |
from langchain.prompts.prompt import PromptTemplate
|
11 |
+
from langchain_community.llms import LlamaCpp
|
12 |
from langchain.chains import RetrievalQA
|
13 |
|
14 |
# Upload pdf file into 'pdf-data' folder if it does not exist
|
|
|
38 |
|
39 |
lv_file_name = mv_pdf_input_file.name[:-4] + ".vectorstore"
|
40 |
|
41 |
+
if not os.path.exists(os.path.join("vectordb","fiaas")):
|
42 |
+
os.makedirs(os.path.join("vectordb","fiaas"))
|
43 |
|
44 |
+
lv_temp_file_path = os.path.join(os.path.join("vectordb","fiaas"),lv_file_name)
|
45 |
lv_embeddings = HuggingFaceEmbeddings(
|
46 |
model_name="sentence-transformers/all-mpnet-base-v2",
|
47 |
model_kwargs={'device': 'cpu'}
|
|
|
109 |
|
110 |
if mv_selected_model == 'microsoft/phi-2':
|
111 |
lv_download_url = "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q2_K.gguf"
|
112 |
+
elif mv_selected_model == 'google/gemma-2b':
|
113 |
+
lv_download_url = "https://huggingface.co/MaziyarPanahi/gemma-2b-it-GGUF/resolve/main/gemma-2b-it.Q2_K.gguf"
|
114 |
elif mv_selected_model == 'mistralai/Mistral-7B-Instruct-v0.2':
|
115 |
lv_download_url = "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q2_K.gguf"
|
116 |
|
|
|
160 |
if mv_selected_model == 'microsoft/phi-2':
|
161 |
lv_model_path = "model/phi-2.Q2_K.gguf"
|
162 |
lv_model_type = "pi"
|
163 |
+
elif mv_selected_model == 'google/gemma-2b':
|
164 |
+
lv_model_path = "model/gemma-2b-it.Q2_K.gguf"
|
165 |
+
lv_model_type = "gemma"
|
166 |
elif mv_selected_model == 'mistralai/Mistral-7B-Instruct-v0.2':
|
167 |
lv_model_path = "model/mistral-7b-instruct-v0.2.Q2_K.gguf"
|
168 |
lv_model_type = "mistral"
|
169 |
+
|
|
|
170 |
|
171 |
print("Step4: Generating LLM response")
|
172 |
fn_display_user_messages("Step4: Generating LLM response","Info", mv_processing_message)
|
173 |
|
174 |
+
lv_model = LlamaCpp(
|
175 |
+
model_path=lv_model_path,
|
176 |
+
temperature=0.75,
|
177 |
+
max_tokens=2048,
|
178 |
+
top_p=1,
|
179 |
+
verbose=False
|
180 |
+
)
|
181 |
lv_retriever = lv_vector_store.as_retriever(search_kwargs={'k': 2})
|
182 |
lv_qa_chain = RetrievalQA.from_chain_type( llm=lv_model,
|
183 |
chain_type='stuff',
|
|
|
211 |
mv_selected_model = col3.selectbox('Select Model',
|
212 |
[
|
213 |
'microsoft/phi-2',
|
214 |
+
'google/gemma-2b',
|
215 |
'mistralai/Mistral-7B-Instruct-v0.2'
|
216 |
]
|
217 |
)
|