File size: 1,606 Bytes
87cd864 f36f021 87cd864 a8d22a4 87cd864 e4ef93a 957f93b e4ef93a 957f93b 87cd864 e4ef93a 957f93b c585131 87cd864 c585131 e4ef93a 957f93b f8c04d6 8a646af 87cd864 2cc6ea2 87cd864 2cc6ea2 87cd864 8a646af 87cd864 957f93b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
# app.py
import gradio as gr
from my_memory_logic import run_with_session_memory
def chat_interface_fn(message, history, session_id):
"""
Multi-turn chat function for Gradio's ChatInterface.
'session_id' is used to store conversation across turns.
Deduplicates consecutive repeated Q&A pairs to avoid repetition.
"""
# 1) Get answer from the session-based memory pipeline
answer = run_with_session_memory(message, session_id)
# 2) Deduplicate consecutive identical exchanges
if not history or history[-1] != (message, answer):
history.append((message, answer))
# 3) Convert history to message dictionaries for display
message_dicts = []
for user_msg, ai_msg in history:
message_dicts.append({"role": "user", "content": user_msg})
message_dicts.append({"role": "assistant", "content": ai_msg})
# Return the message dicts and updated history
return message_dicts
my_chat_css = """
.gradio-container {
margin: auto;
}
.user .wrap {
text-align: right !important;
}
.assistant .wrap {
text-align: left !important;
}
"""
with gr.Blocks(css=my_chat_css) as demo:
gr.Markdown("### DailyWellnessAI (User on right, Assistant on left)")
session_id_box = gr.Textbox(label="Session ID", value="abc123", interactive=True)
chat_interface = gr.ChatInterface(
fn=lambda msg, hist: chat_interface_fn(msg, hist, session_id_box.value),
title="DailyWellnessAI (Session-based Memory)",
description="Ask your questions. The session_id determines your stored memory."
)
demo.launch()
|