synalinks-noteboooks / code_examples /1_basics /4_conversational_applications.py
YoanSallami
Avoid setting the env variables
45d9b08
import marimo
__generated_with = "0.11.9"
app = marimo.App()
@app.cell(hide_code=True)
def _():
import marimo as mo
import synalinks
synalinks.backend.clear_session()
return mo, synalinks
@app.cell(hide_code=True)
def _(mo):
mo.md(
r"""
# Conversational Applications
Synalinks is designed to handle conversational applications as well as
query-based systems. In the case of a conversational applications, the
input data model is a list of chat messages, and the output an individual
chat message. The `Program` is in that case responsible of handling a
**single conversation turn**.
"""
)
return
@app.cell(hide_code=True)
def _(mo):
mo.md(
r"""
Now we can program our application like you would do with any `Program`. For this example,
we are going to make a very simple chatbot.
By default, if no data_model/schema is provided to the `Generator` it will output a `ChatMessage` like output.
If the data model is `None`, then you can enable streaming.
**Note:** Streaming is disabled during training and should only be used in the **last** `Generator` of your pipeline.
"""
)
return
@app.cell
async def _(synalinks):
from synalinks.backend import ChatMessage
from synalinks.backend import ChatRole
from synalinks.backend import ChatMessages
language_model = synalinks.LanguageModel(
model="openai/gpt-4o-mini",
)
_x0 = synalinks.Input(data_model=ChatMessages)
_x1 = await synalinks.Generator(
language_model=language_model,
prompt_template=synalinks.chat_prompt_template(),
streaming=False, # Marimo chat don't handle streaming yet
)(_x0)
program = synalinks.Program(
inputs=_x0,
outputs=_x1,
)
# Let's plot this program to understand it
synalinks.utils.plot_program(
program,
show_module_names=True,
show_trainable=True,
show_schemas=True,
)
return ChatMessage, ChatMessages, ChatRole, language_model, program
@app.cell(hide_code=True)
def _(mo):
mo.md(
r"""
## Running the chatbot inside the notebook
In this example, we will show you how to run the conversational application inside this reactive notebook.
"""
)
return
@app.cell(hide_code=True)
def _(mo):
openai_api_key = mo.ui.text_area(placeholder="Your OpenAI API key...").form()
openai_api_key
return
@app.cell(hide_code=True)
def _(mo, openai_api_key):
import litellm
mo.stop(not openai_api_key.value)
litellm.openai_key = openai_api_key.value
return
@app.cell(hide_code=True)
def _(ChatMessage, ChatMessages, ChatRole, mo, program):
mo.stop(not openai_api_key.value, mo.md("Provide your OpenAI API key"))
def cleanup_assistant_message(msg):
start_tok = '<span class="paragraph">'
end_tok = "</span>"
if msg.content.find(start_tok) > 0:
msg.content = msg.content[msg.content.find(start_tok) + len(start_tok) :]
if msg.content.find(end_tok, 1) > 0:
msg.content = msg.content[: msg.content.find(end_tok, 1)]
return msg
async def synalinks_program(messages, config):
chat_history = ChatMessages()
for msg in messages:
if msg.role == "user":
chat_history.messages.append(
ChatMessage(
role=ChatRole.USER,
content=msg.content,
)
)
else:
msg = cleanup_assistant_message(msg)
chat_history.messages.append(
ChatMessage(
role=ChatRole.ASSISTANT,
content=msg.content,
)
)
result = await program(chat_history)
return result.get("content")
chat = mo.ui.chat(synalinks_program)
chat
return chat, cleanup_assistant_message, synalinks_program
@app.cell(hide_code=True)
async def _(mo):
mo.md(
r"""
## Conclusion
In this notebook, we explored how Synalinks handle conversational applications.
You have now a solid understanding to create chatbots and conversational agents.
### Key Takeaways
- **Conversational Flow Management**: Synalinks effectively manages conversational
applications by handling inputs as a list of chat messages and generating
individual chat messages as outputs. This structure allows for efficient
processing of conversation turns.
- **Streaming and Real-Time Interaction**: Synalinks supports streaming for
real-time interactions, enhancing user engagement. However, streaming is
disabled during training and should be used only in the final `Generator`.
- **Customizable Prompt Templates**: The prompt templates can be tailored to fit
conversational contexts, guiding the language model to produce coherent and
relevant responses.
"""
)
return
if __name__ == "__main__":
app.run()