Spaces:
Sleeping
Sleeping
import streamlit as st | |
import pandas as pd | |
import numpy as np | |
import plotly.express as px | |
from openai import OpenAI | |
import os | |
from dotenv import load_dotenv | |
import json | |
# Load environment variables | |
load_dotenv() | |
# Set page config | |
st.set_page_config( | |
page_title="GPT-4o Calculator & Demo", | |
page_icon="π€", | |
layout="wide", | |
initial_sidebar_state="collapsed" | |
) | |
# Custom CSS for dark theme | |
st.markdown(""" | |
<style> | |
/* Dark theme */ | |
.stApp { | |
background-color: #0e1117; | |
color: #ffffff; | |
} | |
/* Tab styling */ | |
.stTabs [data-baseweb="tab-list"] { | |
gap: 1rem; | |
background-color: #1a1f2b; | |
padding: 0.5rem; | |
border-radius: 0.5rem; | |
} | |
.stTabs [data-baseweb="tab"] { | |
height: auto; | |
padding: 1rem; | |
background-color: #262b37; | |
border: 1px solid #2d3748; | |
border-radius: 0.5rem; | |
color: #e2e8f0; | |
font-weight: 500; | |
} | |
.stTabs [data-baseweb="tab"]:hover { | |
background-color: #2d3748; | |
} | |
/* Card styling */ | |
.card { | |
padding: 1.5rem; | |
background-color: #1a1f2b; | |
border: 1px solid #2d3748; | |
border-radius: 0.5rem; | |
margin: 1rem 0; | |
} | |
/* Text styling */ | |
h1, h2, h3 { | |
color: #e2e8f0; | |
} | |
p { | |
color: #a0aec0; | |
} | |
/* Input styling */ | |
.stTextInput > div > div { | |
background-color: #262b37; | |
color: #e2e8f0; | |
} | |
/* Metric styling */ | |
[data-testid="stMetricValue"] { | |
color: #e2e8f0; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# Check for API key | |
if 'OPENAI_API_KEY' not in st.session_state: | |
api_key = st.text_input('Enter your OpenAI API key:', type='password') | |
if api_key: | |
st.session_state['OPENAI_API_KEY'] = api_key | |
st.success('API key saved!') | |
else: | |
st.warning('Please enter your OpenAI API key to continue.') | |
st.stop() | |
# Initialize OpenAI client | |
client = OpenAI(api_key=st.session_state['OPENAI_API_KEY']) | |
# Pricing data | |
pricing_data = { | |
"gpt-4o-audio-preview": { | |
"text_input": 2.50, | |
"text_output": 10.00, | |
"audio_input": 100.00, | |
"audio_output": 200.00, | |
"description": "Full-featured model with highest quality" | |
}, | |
"gpt-4o-mini-audio-preview": { | |
"text_input": 0.150, | |
"text_output": 0.600, | |
"audio_input": 10.000, | |
"audio_output": 20.000, | |
"description": "Cost-effective model for development" | |
} | |
} | |
def chat_completion_to_dict(response): | |
"""Convert ChatCompletion object to a dictionary""" | |
if isinstance(response, str): | |
return {"error": response} | |
return { | |
"id": response.id, | |
"choices": [{ | |
"index": choice.index, | |
"message": { | |
"role": choice.message.role, | |
"content": choice.message.content | |
}, | |
"finish_reason": choice.finish_reason | |
} for choice in response.choices], | |
"created": response.created, | |
"model": response.model, | |
"usage": { | |
"prompt_tokens": response.usage.prompt_tokens, | |
"completion_tokens": response.usage.completion_tokens, | |
"total_tokens": response.usage.total_tokens | |
} | |
} | |
def make_api_call(text_input, model="gpt-4o-mini-audio-preview"): | |
"""Make actual API call to OpenAI""" | |
try: | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{ | |
"role": "user", | |
"content": [ | |
{ | |
"type": "text", | |
"text": text_input | |
} | |
] | |
} | |
], | |
modalities=["text", "audio"], | |
audio={ | |
"voice": "verse", | |
"format": "pcm16" | |
}, | |
response_format={ | |
"type": "text" | |
}, | |
temperature=0, | |
max_completion_tokens=2048 | |
) | |
return response | |
except Exception as e: | |
return f"Error: {str(e)}" | |
def calculate_cost(model, input_type, duration): | |
"""Calculate cost based on input parameters""" | |
pricing = pricing_data[model] | |
if input_type == "Audio": | |
tokens = duration * 1000 # ~1000 tokens per minute of audio | |
input_cost = (tokens * pricing["audio_input"]) / 1000000 | |
output_cost = (tokens * pricing["audio_output"]) / 1000000 | |
else: | |
words = duration * 150 # ~150 words per minute | |
tokens = words * 1.3 # ~1.3 tokens per word | |
input_cost = (tokens * pricing["text_input"]) / 1000000 | |
output_cost = (tokens * pricing["text_output"]) / 1000000 | |
return { | |
"tokens": tokens, | |
"input_cost": input_cost, | |
"output_cost": output_cost, | |
"total": input_cost + output_cost | |
} | |
# Main app | |
st.title("GPT-4o Calculator & Demo π€") | |
# Create tabs | |
tab1, tab2, tab3 = st.tabs([ | |
"π° Cost Calculator", | |
"π― Live Demo", | |
"π Documentation" | |
]) | |
# Tab 1: Cost Calculator | |
with tab1: | |
st.header("Cost Calculator") | |
col1, col2 = st.columns([1, 1]) | |
with col1: | |
model = st.selectbox( | |
"Select Model", | |
options=list(pricing_data.keys()), | |
help="Choose the GPT-4o model" | |
) | |
input_type = st.radio( | |
"Input Type", | |
options=["Text", "Audio"], | |
horizontal=True, | |
help="Select the type of content you're processing" | |
) | |
duration = st.number_input( | |
"Duration (minutes)", | |
min_value=0.0, | |
value=1.0, | |
step=0.5, | |
help="Enter the duration of your content" | |
) | |
costs = calculate_cost(model, input_type, duration) | |
with col2: | |
st.subheader("Cost Breakdown") | |
col_a, col_b = st.columns(2) | |
with col_a: | |
st.metric("Input Cost", f"${costs['input_cost']:.2f}") | |
with col_b: | |
st.metric("Output Cost", f"${costs['output_cost']:.2f}") | |
st.metric("Total Cost", f"${costs['total']:.2f}") | |
# Visualize token usage | |
fig = px.pie( | |
values=[costs['input_cost'], costs['output_cost']], | |
names=['Input Cost', 'Output Cost'], | |
title='Cost Distribution' | |
) | |
fig.update_layout( | |
paper_bgcolor='rgba(0,0,0,0)', | |
plot_bgcolor='rgba(0,0,0,0)', | |
font_color='#e2e8f0' | |
) | |
st.plotly_chart(fig, use_container_width=True) | |
# Tab 2: Live Demo | |
with tab2: | |
st.header("Live API Demo") | |
demo_text = st.text_input( | |
"Enter your message", | |
value="Hello, how are you today?", | |
help="Enter the text you want to process" | |
) | |
demo_model = st.selectbox( | |
"Select Model", | |
options=list(pricing_data.keys()), | |
key="demo_model" | |
) | |
if st.button("Send Message"): | |
with st.spinner("Processing your request..."): | |
response = make_api_call(demo_text, demo_model) | |
st.subheader("API Response") | |
if isinstance(response, str) and response.startswith("Error"): | |
st.error(response) | |
else: | |
# Convert the response to a dictionary before JSON serialization | |
response_dict = chat_completion_to_dict(response) | |
st.code(json.dumps(response_dict, indent=2), language="json") | |
# Calculate cost for this request | |
text_costs = calculate_cost(demo_model, "Text", len(demo_text.split()) / 150) | |
st.info(f"Cost for this request: ${text_costs['total']:.4f}") | |
# Tab 3: Documentation | |
with tab3: | |
st.header("Documentation") | |
st.subheader("Model Capabilities") | |
st.markdown(""" | |
GPT-4o supports: | |
- Text-to-text conversion | |
- Text-to-audio conversion | |
- Audio-to-text conversion | |
- Audio-to-audio conversion | |
""") | |
st.subheader("Token Usage") | |
token_data = pd.DataFrame([ | |
{"Content Type": "Text", "Token Rate": "~1.3 tokens per word"}, | |
{"Content Type": "Audio", "Token Rate": "~1000 tokens per minute"} | |
]) | |
st.table(token_data) | |
st.subheader("Pricing Details") | |
for model, prices in pricing_data.items(): | |
with st.expander(model): | |
st.markdown(f""" | |
**Text Processing** | |
- Input: ${prices['text_input']}/1M tokens | |
- Output: ${prices['text_output']}/1M tokens | |
**Audio Processing** | |
- Input: ${prices['audio_input']}/1M tokens | |
- Output: ${prices['audio_output']}/1M tokens | |
""") | |
# Footer | |
st.markdown("---") | |
st.caption("Note: All calculations are estimates. Actual costs may vary based on specific usage patterns.") |