Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,17 +1,20 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
-
from
|
4 |
from langchain_community.document_loaders import PDFPlumberLoader
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
from langchain_core.vectorstores import InMemoryVectorStore
|
7 |
from langchain_core.prompts import ChatPromptTemplate
|
8 |
from langchain.embeddings import HuggingFaceEmbeddings
|
|
|
9 |
|
10 |
|
11 |
# Set up Hugging Face model and token
|
12 |
-
|
13 |
access_token = os.getenv("HF_TOKEN") # Your Hugging Face API token
|
14 |
-
|
|
|
|
|
15 |
|
16 |
# Template for response generation
|
17 |
template = """
|
@@ -26,7 +29,7 @@ pdfs_directory = '../pdfs'
|
|
26 |
os.makedirs(pdfs_directory, exist_ok=True)
|
27 |
|
28 |
# Initialize the embedding model
|
29 |
-
embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") #
|
30 |
|
31 |
# Initialize the vector store for document indexing
|
32 |
vector_store = InMemoryVectorStore(embedding=embedding)
|
@@ -65,14 +68,11 @@ def answer_question(question, documents):
|
|
65 |
full_context = f"{context}"
|
66 |
prompt = ChatPromptTemplate.from_template(template)
|
67 |
|
68 |
-
# Use
|
69 |
-
|
70 |
-
|
71 |
-
# Use the client (InferenceClient) to get a response
|
72 |
-
response = client.query(question_with_context)
|
73 |
-
|
74 |
-
return response["generated_text"] # Assuming the response is in "generated_text"
|
75 |
|
|
|
76 |
|
77 |
# Streamlit file uploader for PDF
|
78 |
uploaded_file = st.file_uploader(
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
+
from langchain.llms import HuggingFacePipeline
|
4 |
from langchain_community.document_loaders import PDFPlumberLoader
|
5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
6 |
from langchain_core.vectorstores import InMemoryVectorStore
|
7 |
from langchain_core.prompts import ChatPromptTemplate
|
8 |
from langchain.embeddings import HuggingFaceEmbeddings
|
9 |
+
from transformers import pipeline
|
10 |
|
11 |
|
12 |
# Set up Hugging Face model and token
|
13 |
+
model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Change to your preferred model
|
14 |
access_token = os.getenv("HF_TOKEN") # Your Hugging Face API token
|
15 |
+
|
16 |
+
# Set up HuggingFace pipeline
|
17 |
+
hf_pipeline = pipeline("text-generation", model=model_name, token=access_token)
|
18 |
|
19 |
# Template for response generation
|
20 |
template = """
|
|
|
29 |
os.makedirs(pdfs_directory, exist_ok=True)
|
30 |
|
31 |
# Initialize the embedding model
|
32 |
+
embedding = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") # Choose any model
|
33 |
|
34 |
# Initialize the vector store for document indexing
|
35 |
vector_store = InMemoryVectorStore(embedding=embedding)
|
|
|
68 |
full_context = f"{context}"
|
69 |
prompt = ChatPromptTemplate.from_template(template)
|
70 |
|
71 |
+
# Use HuggingFacePipeline for generating responses
|
72 |
+
hf_chain = HuggingFacePipeline(pipeline=hf_pipeline) # Wrap pipeline with HuggingFacePipeline
|
73 |
+
chain = prompt | hf_chain # Send the prompt to Hugging Face model via HuggingFacePipeline
|
|
|
|
|
|
|
|
|
74 |
|
75 |
+
return chain.invoke({"question": question, "context": full_context})
|
76 |
|
77 |
# Streamlit file uploader for PDF
|
78 |
uploaded_file = st.file_uploader(
|