Pijush2023 commited on
Commit
79a2714
·
verified ·
1 Parent(s): 8a9de56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -9
app.py CHANGED
@@ -357,15 +357,16 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
357
  response = fetch_google_flights()
358
  return response, extract_addresses(response)
359
 
360
- prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
361
-
362
- if retrieval_mode == "VDB":
363
- # Retrieve context from the vector database
 
 
364
  context = retriever.get_relevant_documents(message)
365
-
366
- # Format the prompt
367
  prompt = prompt_template.format(context=context, question=message)
368
 
 
369
  if selected_model == chat_model:
370
  # Use GPT-4o with Langchain
371
  qa_chain = RetrievalQA.from_chain_type(
@@ -380,10 +381,10 @@ def generate_answer(message, choice, retrieval_mode, selected_model):
380
  elif selected_model == phi_pipe:
381
  # Use Phi-3.5 directly with the formatted prompt
382
  response = selected_model(prompt, **{
383
- "max_new_tokens": 300, # Limit the tokens for faster generation
384
  "return_full_text": False,
385
- "temperature": 0.5, # Adjust temperature for more consistent answers
386
- "do_sample": True,
387
  })[0]['generated_text']
388
  return response, extract_addresses(response)
389
 
 
357
  response = fetch_google_flights()
358
  return response, extract_addresses(response)
359
 
360
+ # Use a simple, direct prompt for Phi-3.5
361
+ if selected_model == phi_pipe:
362
+ prompt = f"Here is the information : , {message}"
363
+ else:
364
+ # Use the existing prompt templates for GPT-4o
365
+ prompt_template = QA_CHAIN_PROMPT_1 if choice == "Details" else QA_CHAIN_PROMPT_2
366
  context = retriever.get_relevant_documents(message)
 
 
367
  prompt = prompt_template.format(context=context, question=message)
368
 
369
+ if retrieval_mode == "VDB":
370
  if selected_model == chat_model:
371
  # Use GPT-4o with Langchain
372
  qa_chain = RetrievalQA.from_chain_type(
 
381
  elif selected_model == phi_pipe:
382
  # Use Phi-3.5 directly with the formatted prompt
383
  response = selected_model(prompt, **{
384
+ "max_new_tokens": 300,
385
  "return_full_text": False,
386
+ "temperature": 0.5,
387
+ "do_sample": False,
388
  })[0]['generated_text']
389
  return response, extract_addresses(response)
390