Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -307,8 +307,16 @@ def retry_last_response(history, use_web_search, model, temperature, num_calls):
|
|
307 |
|
308 |
return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
|
309 |
|
310 |
-
def create_conversation_chain(model_name, vector_store):
|
311 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
312 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
313 |
|
314 |
chain = ConversationalRetrievalChain.from_llm(
|
@@ -347,7 +355,7 @@ def respond(message, history, model, temperature, num_calls, use_web_search, sel
|
|
347 |
filtered_db = FAISS.from_documents(filtered_docs, embed)
|
348 |
|
349 |
# Create the conversation chain
|
350 |
-
chain = create_conversation_chain(model, filtered_db)
|
351 |
|
352 |
# Generate response
|
353 |
response = chain({"question": message})
|
|
|
307 |
|
308 |
return chatbot_interface(last_user_msg, history, use_web_search, model, temperature, num_calls)
|
309 |
|
310 |
+
def create_conversation_chain(model_name, vector_store, temperature=0.5):
|
311 |
+
# Create a HuggingFaceHub instance
|
312 |
+
llm = HuggingFaceHub(
|
313 |
+
repo_id=model_name,
|
314 |
+
model_kwargs={
|
315 |
+
"temperature": temperature
|
316 |
+
},
|
317 |
+
huggingfacehub_api_token=huggingface_token # Ensure this variable is defined in your environment
|
318 |
+
)
|
319 |
+
|
320 |
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
321 |
|
322 |
chain = ConversationalRetrievalChain.from_llm(
|
|
|
355 |
filtered_db = FAISS.from_documents(filtered_docs, embed)
|
356 |
|
357 |
# Create the conversation chain
|
358 |
+
chain = create_conversation_chain(model, filtered_db, temperature)
|
359 |
|
360 |
# Generate response
|
361 |
response = chain({"question": message})
|