DrishtiSharma's picture
Update app.py
ba331c4 verified
raw
history blame
9.1 kB
# ref: https://github.com/kram254/Mixture-of-Agents-running-on-Groq/tree/main
import streamlit as st
import json
import asyncio
from typing import Union, Iterable, AsyncIterable
from moa.agent import MOAgent
from moa.agent.moa import ResponseChunk
from streamlit_ace import st_ace
import copy
# Default configuration
default_config = {
"main_model": "llama3-70b-8192",
"cycles": 3,
"layer_agent_config": {}
}
layer_agent_config_def = {
"layer_agent_1": {
"system_prompt": "Think through your response step by step. {helper_response}",
"model_name": "llama3-8b-8192"
},
"layer_agent_2": {
"system_prompt": "Respond with a thought and then your response to the question. {helper_response}",
"model_name": "gemma-7b-it",
"temperature": 0.7
},
"layer_agent_3": {
"system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
"model_name": "llama3-8b-8192"
},
}
# Recommended configuration
rec_config = {
"main_model": "llama3-70b-8192",
"cycles": 2,
"layer_agent_config": {}
}
layer_agent_config_rec = {
"layer_agent_1": {
"system_prompt": "Think through your response step by step. {helper_response}",
"model_name": "llama3-8b-8192",
"temperature": 0.1
},
"layer_agent_2": {
"system_prompt": "Respond with a thought and then your response to the question. {helper_response}",
"model_name": "llama3-8b-8192",
"temperature": 0.2
},
"layer_agent_3": {
"system_prompt": "You are an expert at logic and reasoning. Always take a logical approach to the answer. {helper_response}",
"model_name": "llama3-8b-8192",
"temperature": 0.4
},
"layer_agent_4": {
"system_prompt": "You are an expert planner agent. Create a plan for how to answer the human's query. {helper_response}",
"model_name": "mixtral-8x7b-32768",
"temperature": 0.5
},
}
# Unified streaming function to handle async and sync responses
async def stream_or_async_response(messages: Union[Iterable[ResponseChunk], AsyncIterable[ResponseChunk]]):
layer_outputs = {}
async def process_message(message):
if message['response_type'] == 'intermediate':
layer = message['metadata']['layer']
if layer not in layer_outputs:
layer_outputs[layer] = []
layer_outputs[layer].append(message['delta'])
else:
for layer, outputs in layer_outputs.items():
st.write(f"Layer {layer}")
cols = st.columns(len(outputs))
for i, output in enumerate(outputs):
with cols[i]:
st.expander(label=f"Agent {i+1}", expanded=False).write(output)
layer_outputs.clear()
yield message['delta']
if isinstance(messages, AsyncIterable):
# Process asynchronous messages
async for message in messages:
await process_message(message)
else:
# Process synchronous messages
for message in messages:
await process_message(message)
# Set up the MOAgent
def set_moa_agent(
main_model: str = default_config['main_model'],
cycles: int = default_config['cycles'],
layer_agent_config: dict[dict[str, any]] = copy.deepcopy(layer_agent_config_def),
main_model_temperature: float = 0.1,
override: bool = False
):
if override or ("main_model" not in st.session_state):
st.session_state.main_model = main_model
if override or ("cycles" not in st.session_state):
st.session_state.cycles = cycles
if override or ("layer_agent_config" not in st.session_state):
st.session_state.layer_agent_config = layer_agent_config
if override or ("main_temp" not in st.session_state):
st.session_state.main_temp = main_model_temperature
cls_ly_conf = copy.deepcopy(st.session_state.layer_agent_config)
if override or ("moa_agent" not in st.session_state):
st.session_state.moa_agent = MOAgent.from_config(
main_model=st.session_state.main_model,
cycles=st.session_state.cycles,
layer_agent_config=cls_ly_conf,
temperature=st.session_state.main_temp
)
del cls_ly_conf
del layer_agent_config
# Streamlit app layout
st.set_page_config(
page_title="Mixture of Agents",
menu_items={
'About': "## Groq Mixture-Of-Agents \n Powered by [Groq](https://groq.com)"
},
layout="wide"
)
valid_model_names = [
'llama3-70b-8192',
'llama3-8b-8192',
'gemma-7b-it',
'gemma2-9b-it',
'mixtral-8x7b-32768'
]
st.markdown("<a href='https://groq.com'><img src='app/static/banner.png' width='500'></a>", unsafe_allow_html=True)
st.write("---")
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
set_moa_agent()
# Sidebar for configuration
with st.sidebar:
st.title("MOA Configuration")
with st.form("Agent Configuration", border=False):
if st.form_submit_button("Use Recommended Config"):
try:
set_moa_agent(
main_model=rec_config['main_model'],
cycles=rec_config['cycles'],
layer_agent_config=layer_agent_config_rec,
override=True
)
st.session_state.messages = []
st.success("Configuration updated successfully!")
except Exception as e:
st.error(f"Error updating configuration: {str(e)}")
# Main model selection
new_main_model = st.selectbox(
"Select Main Model",
options=valid_model_names,
index=valid_model_names.index(st.session_state.main_model)
)
# Cycles input
new_cycles = st.number_input(
"Number of Layers",
min_value=1,
max_value=10,
value=st.session_state.cycles
)
# Main Model Temperature
main_temperature = st.number_input(
label="Main Model Temperature",
value=0.1,
min_value=0.0,
max_value=1.0,
step=0.1
)
# Layer agent configuration
new_layer_agent_config = st_ace(
value=json.dumps(st.session_state.layer_agent_config, indent=2),
language='json',
placeholder="Layer Agent Configuration (JSON)",
show_gutter=False,
wrap=True,
auto_update=True
)
if st.form_submit_button("Update Configuration"):
try:
new_layer_config = json.loads(new_layer_agent_config)
set_moa_agent(
main_model=new_main_model,
cycles=new_cycles,
layer_agent_config=new_layer_config,
main_model_temperature=main_temperature,
override=True
)
st.session_state.messages = []
st.success("Configuration updated successfully!")
except Exception as e:
st.error(f"Error updating configuration: {str(e)}")
# Main app layout
st.header("Mixture of Agents")
st.write("This project oversees implementation of Mixture of Agents architecture powered by Groq LLMs.")
# Display current configuration
with st.expander("Current MOA Configuration", expanded=False):
st.markdown(f"**Main Model**: `{st.session_state.main_model}`")
st.markdown(f"**Main Model Temperature**: `{st.session_state.main_temp:.1f}`")
st.markdown(f"**Layers**: `{st.session_state.cycles}`")
st.markdown("**Layer Agents Config:**")
st_ace(
value=json.dumps(st.session_state.layer_agent_config, indent=2),
language='json',
placeholder="Layer Agent Configuration (JSON)",
show_gutter=False,
wrap=True,
readonly=True,
auto_update=True
)
# Chat interface
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("Ask a question"):
async def handle_query():
st.session_state.messages.append({"role": "user", "content": query})
with st.chat_message("user"):
st.write(query)
moa_agent: MOAgent = st.session_state.moa_agent
with st.chat_message("assistant"):
message_placeholder = st.empty()
messages = moa_agent.chat(query, output_format='json')
async for response in stream_or_async_response(messages):
message_placeholder.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
asyncio.run(handle_query())
# Add acknowledgment at the bottom
st.markdown("---")
st.markdown("""
###
This app is based on [Emmanuel M. Ndaliro's work](https://github.com/kram254/Mixture-of-Agents-running-on-Groq/tree/main).
""")