Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from langchain_community.document_loaders import PyPDFLoader | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_community.vectorstores.faiss import FAISS | |
from sentence_transformers import SentenceTransformer | |
from langchain.chains import RetrievalQA | |
from langchain_community.llms import HuggingFaceHub | |
from langchain.docstore.document import Document | |
# Load the PDF document | |
loader = PyPDFLoader("apexcustoms.pdf") | |
data = loader.load() | |
# Split the document into chunks | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20) | |
texts = text_splitter.split_documents(data) | |
# Create a list of document objects from the texts | |
documents = [Document(page_content=doc.page_content) for doc in texts] | |
# Create a vector store | |
embeddings = SentenceTransformer("sentence-transformers/all-mpnet-base-v2") | |
texts = [doc.page_content for doc in documents] # Get the text content from the documents | |
embeddings = embeddings.encode(texts) # Get the embeddings for the texts | |
vector_store = FAISS.from_documents(documents, embeddings) | |
# Initialize the HuggingFaceHub LLM | |
llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta", model_kwargs={"temperature": None, "top_p": None}) | |
# Initialize the RetrievalQA chain | |
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vector_store.as_retriever()) | |
def respond(message, history, system_message, max_tokens, temperature, top_p): | |
# Update the temperature and top_p values for the LLM | |
llm.model_kwargs["temperature"] = temperature | |
llm.model_kwargs["top_p"] = top_p | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
result = qa({"input_documents": documents, "question": message}) | |
response = result["result"] | |
history.append((message, response)) | |
return response, history | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. (You must not generate the next question of the user yourself, you only have to answer.) \n\nUser: ", label="System message"), | |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), | |
], | |
) | |
if __name__ == "__main__": | |
demo.launch() |