segestic's picture
Update app.py
1069dbd verified
raw
history blame
2.53 kB
import os
import gradio as gr
from gradio_client import Client
# Access the Hugging Face token from environment variables
hf_token = os.getenv("HF_TOKEN")
# Check if the token is set (optional for debugging)
if hf_token is None:
print("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
else:
print("Hugging Face token successfully retrieved.")
# Initialize the Client for the Hugging Face Space with the token
client = Client("segestic/token-cost-calculator", hf_token=hf_token)
# Function to interact with the Hugging Face Space API
def interact_with_space(system_prompt, user_prompt, assistant_response, model="gpt-4o"):
# Call the API with the provided inputs
result = client.predict(
system_prompt=system_prompt,
user_prompt=user_prompt,
assistant_response=assistant_response,
model=model,
api_name="/predict"
)
return result
# Define the Gradio interface for the UI
def create_ui():
# Create the input and output components for the UI
with gr.Blocks() as demo:
# Inputs
system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Enter system prompt here", lines=2)
user_prompt_input = gr.Textbox(label="User Prompt", placeholder="Enter user prompt here", lines=2)
assistant_response_input = gr.Textbox(label="Assistant Response", placeholder="Enter assistant response here", lines=2)
model_input = gr.Dropdown(
choices=[
"gpt-4o", "gpt-4o-mini", "gpt-4o-mini-audio-preview", "gpt-4o-mini-realtime-preview",
"o1", "o1-preview", "o1-mini", "gpt-4o-realtime-preview", "gpt-4o-audio-preview",
"gpt-4-turbo-preview", "gpt-3.5-turbo", "gpt-4"
],
label="Model",
value="gpt-4o", # Default value is "gpt-4o"
info="Pricing: Different models have different costs for input and output tokens, per call price."
)
# Output
output = gr.HTML(label="Response")
# Button to send inputs to the model and get a response
submit_btn = gr.Button("Get Response")
# When the button is clicked, interact with the Space and display the result
submit_btn.click(interact_with_space,
inputs=[system_prompt_input, user_prompt_input, assistant_response_input, model_input],
outputs=[output])
return demo
# Launch the Gradio UI
ui = create_ui()
ui.launch(show_error=True)