Moha782 commited on
Commit
34e179e
·
verified ·
1 Parent(s): 6c6bd03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -13
app.py CHANGED
@@ -2,7 +2,6 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  from langchain_community.vectorstores.faiss import FAISS
4
  from langchain.chains import RetrievalQA
5
- from langchain_huggingface import HuggingFacePipeline
6
 
7
  # Load the vector store from the saved index files
8
  vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_deserialization=True)
@@ -10,17 +9,17 @@ vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_des
10
  # Load the model using InferenceClient
11
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
12
 
13
- # Initialize the HuggingFacePipeline LLM
14
- llm = HuggingFacePipeline(client=client, model_kwargs={"temperature": None, "top_p": None})
15
-
16
  # Initialize the RetrievalQA chain
17
- qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=vector_store.as_retriever())
18
-
19
- def respond(message, history, system_message, max_tokens, temperature, top_p):
20
- # Update the temperature and top_p values for the LLM
21
- llm.model_kwargs["temperature"] = temperature
22
- llm.model_kwargs["top_p"] = top_p
23
-
 
 
 
24
  messages = [{"role": "system", "content": system_message}]
25
 
26
  for val in history:
@@ -43,10 +42,16 @@ For information on how to customize the ChatInterface, peruse the gradio docs: h
43
  demo = gr.ChatInterface(
44
  respond,
45
  additional_inputs=[
46
- gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. (You must not generate the next question of the user yourself, you only have to answer.) \n\nUser: ", label="System message"),
47
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
48
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
49
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
50
  ],
51
  )
52
 
 
2
  from huggingface_hub import InferenceClient
3
  from langchain_community.vectorstores.faiss import FAISS
4
  from langchain.chains import RetrievalQA
 
5
 
6
  # Load the vector store from the saved index files
7
  vector_store = FAISS.load_local("db.index", embeddings=None, allow_dangerous_deserialization=True)
 
9
  # Load the model using InferenceClient
10
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
11
 
 
 
 
12
  # Initialize the RetrievalQA chain
13
+ qa = RetrievalQA.from_chain_type(client=client, chain_type="stuff", retriever=vector_store.as_retriever())
14
+
15
+ def respond(
16
+ message,
17
+ history: list[tuple[str, str]],
18
+ system_message,
19
+ max_tokens,
20
+ temperature,
21
+ top_p,
22
+ ):
23
  messages = [{"role": "system", "content": system_message}]
24
 
25
  for val in history:
 
42
  demo = gr.ChatInterface(
43
  respond,
44
  additional_inputs=[
45
+ gr.Textbox(value="You are a helpful car configuration assistant, specifically you are the assistant for Apex Customs (https://www.apexcustoms.com/). Given the user's input, provide suggestions for car models, colors, and customization options. Be creative and conversational in your responses. You should remember the user car model and tailor your answers accordingly. \n\nUser: ", label="System message"),
46
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
47
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
48
+ gr.Slider(
49
+ minimum=0.1,
50
+ maximum=1.0,
51
+ value=0.95,
52
+ step=0.05,
53
+ label="Top-p (nucleus sampling)",
54
+ ),
55
  ],
56
  )
57