File size: 2,533 Bytes
77ee9c6
cf44614
cddf55e
cf44614
77ee9c6
 
 
 
 
 
 
 
 
 
713aecc
cf44614
cddf55e
 
 
 
 
 
 
 
ce88bc0
cddf55e
 
cf44614
cddf55e
 
 
 
 
 
 
 
ce88bc0
 
 
 
 
 
 
 
 
 
cddf55e
 
1069dbd
cddf55e
 
 
 
 
 
 
 
 
 
 
 
 
13965e9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import os
import gradio as gr
from gradio_client import Client

# Access the Hugging Face token from environment variables
hf_token = os.getenv("HF_TOKEN")

# Check if the token is set (optional for debugging)
if hf_token is None:
    print("Hugging Face token not found. Please set the HF_TOKEN environment variable.")
else:
    print("Hugging Face token successfully retrieved.")

# Initialize the Client for the Hugging Face Space with the token
client = Client("segestic/token-cost-calculator", hf_token=hf_token)

# Function to interact with the Hugging Face Space API
def interact_with_space(system_prompt, user_prompt, assistant_response, model="gpt-4o"):
    # Call the API with the provided inputs
    result = client.predict(
        system_prompt=system_prompt,
        user_prompt=user_prompt,
        assistant_response=assistant_response,
        model=model,
        api_name="/predict"
    )
    return result

# Define the Gradio interface for the UI
def create_ui():
    # Create the input and output components for the UI
    with gr.Blocks() as demo:
        # Inputs
        system_prompt_input = gr.Textbox(label="System Prompt", placeholder="Enter system prompt here", lines=2)
        user_prompt_input = gr.Textbox(label="User Prompt", placeholder="Enter user prompt here", lines=2)
        assistant_response_input = gr.Textbox(label="Assistant Response", placeholder="Enter assistant response here", lines=2)
        model_input = gr.Dropdown(
            choices=[
                "gpt-4o", "gpt-4o-mini", "gpt-4o-mini-audio-preview", "gpt-4o-mini-realtime-preview",
                "o1", "o1-preview", "o1-mini", "gpt-4o-realtime-preview", "gpt-4o-audio-preview",
                "gpt-4-turbo-preview", "gpt-3.5-turbo", "gpt-4"
            ],
            label="Model",
            value="gpt-4o",  # Default value is "gpt-4o"
            info="Pricing: Different models have different costs for input and output tokens, per call price."
        )

        # Output
        output = gr.HTML(label="Response")
        
        # Button to send inputs to the model and get a response
        submit_btn = gr.Button("Get Response")

        # When the button is clicked, interact with the Space and display the result
        submit_btn.click(interact_with_space, 
                         inputs=[system_prompt_input, user_prompt_input, assistant_response_input, model_input], 
                         outputs=[output])

    return demo

# Launch the Gradio UI
ui = create_ui()
ui.launch(show_error=True)