Spaces:
Running
Running
import os | |
from openai import OpenAI | |
from datetime import datetime | |
import gradio as gr | |
import time | |
# --- Constants --- | |
DEFAULT_MODEL = "gpt-4o-mini-2024-07-18" # Assuming gpt-4o is a good default | |
DEFAULT_TEMPERATURE = 1.0 # Match your example | |
DEFAULT_TOP_P = 1.0 # Match your example | |
DEFAULT_FREQ_PENALTY = 0 # Match your example | |
DEFAULT_PRES_PENALTY = 0 # Match your example | |
MAX_TOKENS = 2048 # Match your example | |
MAX_HISTORY_LENGTH = 5 | |
# --- API Key and Client Initialization --- | |
import openai | |
API_KEY = os.getenv("OPENAI_API_KEY") | |
client = OpenAI(api_key=API_KEY) | |
# --- Helper Functions --- | |
def get_openai_response(prompt, model=DEFAULT_MODEL, temperature=DEFAULT_TEMPERATURE, top_p=DEFAULT_TOP_P, | |
frequency_penalty=DEFAULT_FREQ_PENALTY, presence_penalty=DEFAULT_PRES_PENALTY, | |
max_tokens=MAX_TOKENS, system_prompt="", chat_history=None): | |
"""Gets a response from the OpenAI API, handling errors and streaming.""" | |
today_day = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
try: | |
messages = [{"role": "system", "content": f"Todays date is: {today_day} " + system_prompt}] | |
if chat_history: | |
for turn in chat_history: | |
messages.append({"role": "user", "content": turn[0]}) | |
messages.append({"role": "assistant", "content": turn[1]}) | |
messages.append({"role": "user", "content": prompt}) | |
response = client.chat.completions.create( | |
model=model, | |
messages=messages, | |
temperature=temperature, | |
max_tokens=max_tokens, #Use the new name | |
top_p=top_p, | |
frequency_penalty=frequency_penalty, | |
presence_penalty=presence_penalty, | |
response_format={"type": "text"}, # As per your example | |
stream=True # Enable streaming! | |
) | |
collected_messages = [] | |
for chunk in response: | |
chunk_message = chunk.choices[0].delta.content | |
if chunk_message is not None: | |
collected_messages.append(chunk_message) | |
full_reply_content = ''.join(collected_messages) | |
yield full_reply_content | |
except openai.APIConnectionError as e: | |
return f"Error: Could not connect to OpenAI API: {e}" | |
except openai.RateLimitError as e: | |
return f"Error: Rate limit exceeded: {e}" | |
except openai.APIStatusError as e: | |
return f"Error: OpenAI API returned an error: {e}" | |
except Exception as e: | |
return f"An unexpected error occurred: {e}" | |
def update_ui(message, chat_history, model, temperature, top_p, frequency_penalty, presence_penalty, system_prompt, history_length): | |
"""Updates the Gradio UI; handles streaming response.""" | |
bot_message_gen = get_openai_response( | |
prompt=message, model=model, temperature=temperature, top_p=top_p, | |
frequency_penalty=frequency_penalty, presence_penalty=presence_penalty, | |
system_prompt=system_prompt, chat_history=chat_history | |
) | |
chat_history.append((message, "")) | |
for bot_message in bot_message_gen: | |
chat_history[-1] = (chat_history[-1][0], bot_message) | |
visible_history = chat_history[-history_length:] | |
time.sleep(0.025) #Rate limiter | |
yield "", visible_history | |
# --- Gradio Interface --- | |
with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# Chat with GPT-4.5 -> gpt-4.5-preview-2025-02-27 model") | |
gr.Markdown("❗⚠️IMPORTANT:!!! GPT 4.5 IS NO LONGER WORKING ON THIS SPACE, IT WAS FREE FOR ~ 4 HOURS! 02/27/2025| Made by: [@diegocabezas01](https://x.com/diegocabezas01) on X") | |
gr.Markdown("☕ [Buy me a Coffee](https://buymeacoffee.com/diegocp01m)") | |
gr.Markdown("---") | |
gr.Markdown(""" | |
🚀 **GPT-4.5 EXPERIMENT:** GPT-4.5 was released today at 3 PM ET, but it's only available to PRO users and developers. | |
I created a Hugging Face Space using the API so everyone can chat with GPT-4.5 for FREE—until my credits run out! 😄 | |
**Here's how the experiment went:** | |
📊 **Chat Completions Metrics (Feb 27, 2025):** | |
- 111 requests | |
- 64,764 Total tokens processed | |
- Total spend: $10.99 | |
This space went live at 4:23 PM ET, Feb 27, 2025 until 8:53 PM ET. [Read More](https://x.com/diegocabezas01/status/1895291365376041045) | |
Results from OpenAI platform: 👇 | |
""") | |
gr.Image("https://pbs.twimg.com/media/Gk1tVnRXkAASa2U?format=jpg&name=4096x4096", elem_id="gpt4_5_image") | |
gr.Markdown("Chat for Free with GPT 4o mini here: 👇") | |
with gr.Row(): | |
with gr.Column(scale=4): | |
chatbot = gr.Chatbot( | |
show_label=False, | |
avatar_images=( | |
"https://cdn-icons-png.flaticon.com/512/8428/8428718.png", # User image URL | |
"https://upload.wikimedia.org/wikipedia/commons/thumb/e/ef/ChatGPT-Logo.svg/640px-ChatGPT-Logo.svg.png" # OpenAI image URL | |
), | |
render_markdown=True, | |
height=500 | |
) | |
msg = gr.Textbox(placeholder="Type your message here...", scale=4, show_label=False) | |
with gr.Accordion("Advanced Options", open=False): | |
model_select = gr.Dropdown( | |
label="Model", | |
choices=["gpt-3.5-turbo-0125", "gpt-4o-mini-2024-07-18"], # Update with your models | |
value=DEFAULT_MODEL, | |
interactive=True | |
) | |
temperature_slider = gr.Slider(label="Temperature", minimum=0.0, maximum=2.0, value=DEFAULT_TEMPERATURE, step=0.1, interactive=True) | |
top_p_slider = gr.Slider(label="Top P", minimum=0.0, maximum=1.0, value=DEFAULT_TOP_P, step=0.05, interactive=True) | |
frequency_penalty_slider = gr.Slider(label="Frequency Penalty", minimum=-2.0, maximum=2.0, value=DEFAULT_FREQ_PENALTY, step=0.1, interactive=True) | |
presence_penalty_slider = gr.Slider(label="Presence Penalty", minimum=-2.0, maximum=2.0, value=DEFAULT_PRES_PENALTY, step=0.1, interactive=True) | |
system_prompt_textbox = gr.Textbox(label="System Prompt", placeholder="Enter a custom system prompt...", lines=3, interactive=True) | |
history_length_slider = gr.Slider(label="Chat History Length", minimum=1, maximum=20, value=MAX_HISTORY_LENGTH, step=1, interactive=True) | |
with gr.Row(): | |
send = gr.Button("Send") | |
clear = gr.Button("Clear") | |
# --- Event Handlers --- | |
send_event = send.click( | |
update_ui, | |
[msg, chatbot, model_select, temperature_slider, top_p_slider, frequency_penalty_slider, presence_penalty_slider, system_prompt_textbox, history_length_slider], | |
[msg, chatbot] | |
) | |
msg.submit( | |
update_ui, | |
[msg, chatbot, model_select, temperature_slider, top_p_slider, frequency_penalty_slider, presence_penalty_slider, system_prompt_textbox, history_length_slider], | |
[msg, chatbot] | |
) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
gr.Examples( | |
examples=["Tell me about quantum computing", "Write a short poem about AI", "How can I improve my Python skills?"], | |
inputs=msg | |
) | |
msg.focus() | |
# --- Launch --- | |
if __name__ == "__main__": | |
demo.launch() | |