kumaranJaisankar commited on
Commit
e70827c
·
verified ·
1 Parent(s): 2480805

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -4
app.py CHANGED
@@ -72,19 +72,25 @@ runnable_sequence = RunnableSequence(prompt | llm)
72
 
73
  # Initialize memory
74
 
75
- # Define your get_text_response function
76
  def get_text_response(user_message, history):
77
- # Add user message to the memory (for example purposes, assume `add_user_message` is the correct method)
 
 
 
 
78
  memory.save_context({"user_message": user_message}, None)
79
 
80
  # Use the RunnableSequence to generate a response
81
  response = runnable_sequence.run(user_message=user_message)
82
 
83
- # Add LLM response to the memory (for example purposes, assume `add_ai_message` is the correct method)
 
 
 
 
84
  memory.save_context(None, {"chat_history": response})
85
 
86
  return response
87
-
88
  # Example usage with Gradio
89
  theme = "default" # or your custom theme
90
 
 
72
 
73
  # Initialize memory
74
 
 
75
  def get_text_response(user_message, history):
76
+ # Ensure user_message is a string
77
+ if not isinstance(user_message, str):
78
+ raise ValueError("user_message must be a string")
79
+
80
+ # Save the user message to the memory
81
  memory.save_context({"user_message": user_message}, None)
82
 
83
  # Use the RunnableSequence to generate a response
84
  response = runnable_sequence.run(user_message=user_message)
85
 
86
+ # Ensure response is a string
87
+ if not isinstance(response, str):
88
+ raise ValueError("Response must be a string")
89
+
90
+ # Save the LLM response to the memory
91
  memory.save_context(None, {"chat_history": response})
92
 
93
  return response
 
94
  # Example usage with Gradio
95
  theme = "default" # or your custom theme
96