File size: 3,171 Bytes
90592aa
 
19f3952
20d9757
68c3755
20d9757
 
19f3952
6594b61
90592aa
20d9757
 
 
4272192
20d9757
 
 
4272192
6594b61
 
 
20d9757
 
6594b61
a0cc97e
 
6594b61
4272192
20d9757
 
4272192
20d9757
 
4272192
20d9757
 
 
 
4272192
90592aa
 
 
 
 
 
 
 
 
 
6594b61
20d9757
90592aa
20d9757
 
90592aa
 
 
 
 
 
 
20d9757
90592aa
 
20d9757
90592aa
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import gradio as gr
from huggingface_hub import InferenceClient
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.faiss import FAISS
from sentence_transformers import SentenceTransformer
from langchain.chains import RetrievalQA
from langchain_community.llms import HuggingFaceHub
from langchain.docstore.document import Document

# Load the PDF document
loader = PyPDFLoader("apexcustoms.pdf")
data = loader.load()

# Split the document into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
texts = text_splitter.split_documents(data)

# Create a list of document objects from the texts
documents = [Document(page_content=doc.page_content) for doc in texts]

# Create a vector store
embeddings = SentenceTransformer("sentence-transformers/all-mpnet-base-v2")
texts = [doc.page_content for doc in documents]  # Get the text content from the documents
embeddings = embeddings.encode(texts)  # Get the embeddings for the texts

vector_store = FAISS.from_documents(documents, embeddings)

# Initialize the HuggingFaceHub LLM
llm = HuggingFaceHub(repo_id="HuggingFaceH4/zephyr-7b-beta", model_kwargs={"temperature": None, "top_p": None})

# Initialize the RetrievalQA chain
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vector_store.as_retriever())

def respond(message, history, system_message, max_tokens, temperature, top_p):
    # Update the temperature and top_p values for the LLM
    llm.model_kwargs["temperature"] = temperature
    llm.model_kwargs["top_p"] = top_p

    messages = [{"role": "system", "content": system_message}]

    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    result = qa({"input_documents": documents, "question": message})
    response = result["result"]

    history.append((message, response))
    return response, history

"""
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
"""
demo = gr.ChatInterface(
    respond,
    additional_inputs=[
        gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. (You must not generate the next question of the user yourself, you only have to answer.) \n\nUser: ", label="System message"),
        gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
        gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
        gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
    ],
)

if __name__ == "__main__":
    demo.launch()