llmrouter / app.py
ivy-exploration's picture
Add router app
6b368a6 verified
raw
history blame
5.57 kB
from openai import OpenAI
import streamlit as st
import numpy as np
from PIL import Image
from time import perf_counter
# Page configuration, header section, and images
st.set_page_config(
page_title= "Unify Router Demo",
page_icon="./assets/unify_spiral.png",
layout = "wide",
initial_sidebar_state="collapsed"
)
Header = st.columns(3)
with Header[1]:
st.image(
"./assets/unify_logo.png",
use_column_width="auto",
caption="Route your prompt to the best LLM"
)
st.write("Chat with the Unify LLM router! Send your prompt to the best LLM endpoint, optimizing for the metric of your choice. For any given model, the router searches across endpoints from different model endpoint providers to find the one endpoint that will provide the best performance for the target metric, for each prompt")
st.info(
body="This demo is only a preview of the router's functionalities. Check out our [Chat UI](https://unify.ai/router) for the full experience, including more endpoints, and extra customization!",
icon="ℹ️"
)
router_avatar = Image.open('./assets/unify_spiral.png')
# Parameter choices
strategies = {
'πŸƒ fastest': "tks-per-sec",
'βŒ› most responsive': "ttft",
"πŸ’΅ cheapest": "input-cost",
}
models = {
'πŸ¦™ Llama2 70B Chat': "llama-2-70b-chat",
'πŸ•ΈοΈ Mixtral 8x7B Instruct': "mixtral-8x7b-instruct-v0.1",
'πŸ‘¨β€πŸ’» Deepseek Coder 33B Instruct': "deepseek-coder-33b-instruct",
}
# Body
Parameters_Col, Chat_Col = st.columns([1,3])
with Parameters_Col:
strategy = st.selectbox(
label = 'I want the',
options = tuple(strategies.keys()),
help="Choose the metric to optimize the routing for. \
Fastest picks the endpoint with the highest output tokens per seconds. \
Most responsive picks the endpoint with the smallest time to complete the request. \
Cheapest picks the endpoint with the lowest output tokens cost",
)
model = st.selectbox(
label = 'endpoint for',
options = tuple(models.keys()),
help="Select a model to optimize for. The same model can be offered by different model endpoint providers. The router lets you find the optimal endpoint for your chosen model, target metric, and input prompt",
)
with st.expander("Advanced Inputs"):
max_tokens = st.slider(
label = "Maximum Number Of Tokens",
min_value=10,
max_value=500,
value=100,
step=20,
help = "The maximum number of tokens that can be generated."
)
temperature = st.slider(
label = "Temperature",
min_value=0.0,
max_value=1.,
value=0.5,
step=0.5,
help = "The model's output randomness. Higher values give more random outputs."
)
with Chat_Col:
st.write("Chat with Router")
# Initializing empty chat space
if "messages" not in st.session_state:
st.session_state.messages = []
msgs = st.container(height=300)
# Writing conversation history
for msg in st.session_state.messages:
msgs.chat_message(msg["role"]).write(msg["content"])
# Preparing client
client = OpenAI(
base_url="https://api.unify.ai/v0/",
api_key=st.secrets["UNIFY_API"]
)
# Processing prompt box input
if prompt := st.chat_input("Enter your prompt.."):
# Displaying user prompt and saving in message states
msgs.chat_message("user").write(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
# Sending prompt to model endpoint
start = perf_counter()
stream = client.chat.completions.create(
model="@".join([
models[model],
strategies[strategy]
]),
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
max_tokens=max_tokens,
temperature=temperature
)
time_to_completion = round(perf_counter() - start, 2)
# Displaying output, metrics, and saving output in message states
with msgs.chat_message("assistant", avatar=np.array(router_avatar)):
# Writing answer progressively
chunks = [chunk for chunk in stream]
st.write_stream(chunks)
# Computing metrics
last_chunk = chunks[-1]
cost = round(last_chunk.usage["cost"],6)
output_tokens = last_chunk.usage["completion_tokens"]
tokens_per_second = round(output_tokens / time_to_completion, 2)
# Displaying model, provider, and metrics
provider = " ".join(chunks[0].model.split("@")[-1].split("-")).title()
st.markdown(f"Model: **{model}**. Provider: **{provider}**")
st.markdown(
f"**{tokens_per_second}** Tokens Per Second - \
**{time_to_completion}** Seconds to complete - \
**{cost}** $"
)
# Saving output to message states
output_chunks = [chunk.choices[0].delta.content or "" for chunk in chunks]
response = "".join(output_chunks)
st.session_state.messages.append({"role": "assistant", "content": response})
# Cancel / Stop button
if st.button("Clear Chat", key="clear"):
msgs.empty()
st.session_state.messages = []