kumaranJaisankar commited on
Commit
2480805
·
verified ·
1 Parent(s): a8268c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -66,7 +66,7 @@ prompt = PromptTemplate(
66
  # Define the ChatOpenAI model
67
  llm = ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo")
68
 
69
- memory = ConversationBufferMemory()
70
  # Create a RunnableSequence
71
  runnable_sequence = RunnableSequence(prompt | llm)
72
 
@@ -75,13 +75,13 @@ runnable_sequence = RunnableSequence(prompt | llm)
75
  # Define your get_text_response function
76
  def get_text_response(user_message, history):
77
  # Add user message to the memory (for example purposes, assume `add_user_message` is the correct method)
78
- memory.save_context({"role": "user", "content": user_message}, None)
79
 
80
  # Use the RunnableSequence to generate a response
81
  response = runnable_sequence.run(user_message=user_message)
82
 
83
  # Add LLM response to the memory (for example purposes, assume `add_ai_message` is the correct method)
84
- memory.save_context(None, {"role": "assistant", "content": response})
85
 
86
  return response
87
 
 
66
  # Define the ChatOpenAI model
67
  llm = ChatOpenAI(temperature=0.5, model_name="gpt-3.5-turbo")
68
 
69
+ memory = ConversationBufferMemory(memory_key="chat_history")
70
  # Create a RunnableSequence
71
  runnable_sequence = RunnableSequence(prompt | llm)
72
 
 
75
  # Define your get_text_response function
76
  def get_text_response(user_message, history):
77
  # Add user message to the memory (for example purposes, assume `add_user_message` is the correct method)
78
+ memory.save_context({"user_message": user_message}, None)
79
 
80
  # Use the RunnableSequence to generate a response
81
  response = runnable_sequence.run(user_message=user_message)
82
 
83
  # Add LLM response to the memory (for example purposes, assume `add_ai_message` is the correct method)
84
+ memory.save_context(None, {"chat_history": response})
85
 
86
  return response
87