|
import gradio as gr |
|
import openai |
|
import fitz |
|
import base64 |
|
import io |
|
|
|
|
|
api_key = "" |
|
|
|
|
|
def set_api_key(key): |
|
global api_key |
|
api_key = key |
|
return "API Key Set Successfully!" |
|
|
|
|
|
def query_openai(messages, temperature, top_p, max_output_tokens): |
|
if not api_key: |
|
return "Please enter your OpenAI API key first." |
|
|
|
try: |
|
openai.api_key = api_key |
|
|
|
|
|
temperature = float(temperature) if temperature else 1.0 |
|
top_p = float(top_p) if top_p else 1.0 |
|
max_output_tokens = int(max_output_tokens) if max_output_tokens else 2048 |
|
|
|
response = openai.ChatCompletion.create( |
|
model="gpt-4.1", |
|
messages=messages, |
|
temperature=temperature, |
|
top_p=top_p, |
|
max_tokens=max_output_tokens |
|
) |
|
return response["choices"][0]["message"]["content"] |
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
|
|
def image_url_chat(image_url, text_query, temperature, top_p, max_output_tokens): |
|
if not image_url or not text_query: |
|
return "Please provide an image URL and a query." |
|
|
|
messages = [ |
|
{"role": "user", "content": [ |
|
{"type": "image_url", "image_url": {"url": image_url}}, |
|
{"type": "text", "text": text_query} |
|
]}, |
|
] |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def text_chat(text_query, temperature, top_p, max_output_tokens): |
|
if not text_query: |
|
return "Please enter a query." |
|
|
|
messages = [{"role": "user", "content": [{"type": "text", "text": text_query}]}] |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def image_chat(image_file, text_query, temperature, top_p, max_output_tokens): |
|
if image_file is None or not text_query: |
|
return "Please upload an image and provide a query." |
|
|
|
|
|
with open(image_file, "rb") as img: |
|
base64_image = base64.b64encode(img.read()).decode("utf-8") |
|
|
|
image_data = f"data:image/jpeg;base64,{base64_image}" |
|
|
|
messages = [ |
|
{"role": "user", "content": [ |
|
{"type": "image_url", "image_url": {"url": image_data}}, |
|
{"type": "text", "text": text_query} |
|
]}, |
|
] |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def pdf_chat(pdf_file, text_query, temperature, top_p, max_output_tokens): |
|
if pdf_file is None or not text_query: |
|
return "Please upload a PDF and provide a query." |
|
|
|
try: |
|
|
|
doc = fitz.open(pdf_file.name) |
|
text = "\n".join([page.get_text("text") for page in doc]) |
|
|
|
|
|
if not text.strip(): |
|
return "No text found in the PDF." |
|
|
|
|
|
messages = [ |
|
{"role": "user", "content": [ |
|
{"type": "text", "text": text}, |
|
{"type": "text", "text": text_query} |
|
]}, |
|
] |
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
except Exception as e: |
|
return f"Error processing the PDF: {str(e)}" |
|
|
|
|
|
def transcribe_audio(audio_binary, openai_api_key): |
|
if not openai_api_key: |
|
return "Error: No API key provided." |
|
|
|
openai.api_key = openai_api_key |
|
|
|
try: |
|
|
|
audio_file_obj = io.BytesIO(audio_binary) |
|
audio_file_obj.name = 'audio.wav' |
|
|
|
|
|
audio_file_transcription = openai.Audio.transcribe(file=audio_file_obj, model="whisper-1") |
|
return audio_file_transcription.text |
|
except Exception as e: |
|
return f"Error transcribing audio: {str(e)}" |
|
|
|
|
|
def process_uploaded_audio(audio_binary): |
|
if not audio_binary: |
|
return "Please upload an audio file first." |
|
|
|
if not api_key: |
|
return "Please enter your OpenAI API key first." |
|
|
|
try: |
|
transcription = transcribe_audio(audio_binary, api_key) |
|
return transcription |
|
except Exception as e: |
|
return f"Error transcribing audio: {str(e)}" |
|
|
|
|
|
def process_recorded_audio(audio_path): |
|
if not audio_path: |
|
return "No audio recorded." |
|
|
|
if not api_key: |
|
return "Please enter your OpenAI API key first." |
|
|
|
try: |
|
with open(audio_path, "rb") as audio_file: |
|
audio_binary = audio_file.read() |
|
|
|
transcription = transcribe_audio(audio_binary, api_key) |
|
return transcription |
|
except Exception as e: |
|
return f"Error transcribing recorded audio: {str(e)}" |
|
|
|
|
|
def process_voice_query(transcription, temperature, top_p, max_output_tokens): |
|
if not transcription or transcription.startswith("Error") or transcription.startswith("Please"): |
|
return "Please ensure audio is transcribed successfully first." |
|
|
|
|
|
messages = [{"role": "user", "content": [{"type": "text", "text": transcription}]}] |
|
|
|
return query_openai(messages, temperature, top_p, max_output_tokens) |
|
|
|
|
|
def clear_chat(): |
|
|
|
|
|
|
|
|
|
|
|
return ( |
|
"", |
|
"", |
|
"", |
|
"", |
|
"", |
|
"", |
|
"", |
|
None, |
|
"", |
|
"", |
|
None, |
|
"", |
|
"", |
|
None, |
|
"", |
|
"", |
|
1.0, |
|
1.0, |
|
2048 |
|
) |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## GPT-4.5 Preview Chatbot") |
|
|
|
with gr.Accordion("How to Use This App!", open=False): |
|
gr.Markdown(""" |
|
### Getting Started: |
|
1. Enter your OpenAI API key in the field at the top and click "Set API Key" |
|
2. Adjust the hyperparameters if needed (Temperature, Top-P, Max Output Tokens) |
|
|
|
### Using the Different Tabs: |
|
|
|
#### Image URL Chat |
|
- Paste an image URL in the field |
|
- Enter your question about the image |
|
- Click "Ask" to get a response |
|
|
|
#### Text Chat |
|
- Simply type your query in the text field |
|
- Click "Ask" to get a response |
|
|
|
#### Image Chat |
|
- Upload an image from your device |
|
- Enter your question about the uploaded image |
|
- Click "Ask" to get a response |
|
|
|
#### PDF Chat |
|
- Upload a PDF document |
|
- Ask questions about the PDF content |
|
- Click "Ask" to get a response |
|
|
|
#### Voice Chat |
|
- **Upload Audio:** Upload an audio file, click "Transcribe Audio", then click "Ask" |
|
- **Record Audio:** Record your voice, click "Transcribe Recording", then click "Ask" |
|
|
|
### Tips: |
|
- Use the "Clear Chat" button to reset all fields |
|
- For more creative responses, try increasing the Temperature |
|
- For longer responses, increase the Max Output Tokens |
|
""") |
|
|
|
|
|
with gr.Accordion("Hyperparameters", open=False): |
|
gr.Markdown(""" |
|
### Temperature: |
|
Controls the randomness of the model's output. A lower temperature makes the model more deterministic, while a higher temperature makes it more creative and varied. |
|
### Top-P (Nucleus Sampling): |
|
Controls the cumulative probability distribution from which the model picks the next word. A lower value makes the model more focused and deterministic, while a higher value increases randomness. |
|
### Max Output Tokens: |
|
Limits the number of tokens (words or subwords) the model can generate in its response. You can use this to control the length of the response. |
|
""") |
|
|
|
gr.HTML(""" |
|
<style> |
|
#api_key_button { |
|
margin-top: 27px; /* Add margin-top to the button */ |
|
background: linear-gradient(135deg, #4a00e0 0%, #8e2de2 100%); /* Purple gradient */ |
|
} |
|
#api_key_button:hover { |
|
background: linear-gradient(135deg, #5b10f1 0%, #9f3ef3 100%); /* Slightly lighter */ |
|
} |
|
#clear_chat_button { |
|
background: linear-gradient(135deg, #e53e3e 0%, #f56565 100%); /* Red gradient */ |
|
} |
|
#clear_chat_button:hover { |
|
background: linear-gradient(135deg, #c53030 0%, #e53e3e 100%); /* Slightly darker red gradient on hover */ |
|
} |
|
#ask_button { |
|
background: linear-gradient(135deg, #fbd38d 0%, #f6e05e 100%); /* Yellow gradient */ |
|
} |
|
#ask_button:hover { |
|
background: linear-gradient(135deg, #ecc94b 0%, #fbd38d 100%); /* Slightly darker yellow gradient on hover */ |
|
} |
|
#transcribe_button { |
|
background: linear-gradient(135deg, #68d391 0%, #48bb78 100%); /* Green gradient */ |
|
} |
|
|
|
#transcribe_button:hover { |
|
background: linear-gradient(135deg, #38a169 0%, #68d391 100%); /* Slightly darker green gradient on hover */ |
|
} |
|
</style> |
|
""") |
|
|
|
|
|
with gr.Row(): |
|
api_key_input = gr.Textbox(label="Enter OpenAI API Key", type="password") |
|
api_key_button = gr.Button("Set API Key", elem_id="api_key_button") |
|
api_key_output = gr.Textbox(label="API Key Status", interactive=False) |
|
|
|
with gr.Row(): |
|
temperature = gr.Slider(0, 2, value=1.0, step=0.1, label="Temperature") |
|
top_p = gr.Slider(0, 1, value=1.0, step=0.1, label="Top-P") |
|
max_output_tokens = gr.Slider(0, 16384, value=2048, step=512, label="Max Output Tokens") |
|
|
|
with gr.Tabs(): |
|
with gr.Tab("Image URL Chat"): |
|
image_url = gr.Textbox(label="Enter Image URL") |
|
image_query = gr.Textbox(label="Ask about the Image") |
|
image_url_output = gr.Textbox(label="Response", interactive=False) |
|
image_url_button = gr.Button("Ask", elem_id="ask_button") |
|
|
|
with gr.Tab("Text Chat"): |
|
text_query = gr.Textbox(label="Enter your query") |
|
text_output = gr.Textbox(label="Response", interactive=False) |
|
text_button = gr.Button("Ask", elem_id="ask_button") |
|
|
|
with gr.Tab("Image Chat"): |
|
image_upload = gr.File(label="Upload an Image", type="filepath") |
|
image_text_query = gr.Textbox(label="Ask about the uploaded image") |
|
image_output = gr.Textbox(label="Response", interactive=False) |
|
image_button = gr.Button("Ask", elem_id="ask_button") |
|
|
|
with gr.Tab("PDF Chat"): |
|
pdf_upload = gr.File(label="Upload a PDF", type="filepath") |
|
pdf_text_query = gr.Textbox(label="Ask about the uploaded PDF") |
|
pdf_output = gr.Textbox(label="Response", interactive=False) |
|
pdf_button = gr.Button("Ask", elem_id="ask_button") |
|
|
|
with gr.Tab("Voice Chat"): |
|
with gr.Tabs(): |
|
with gr.Tab("Upload Audio"): |
|
|
|
audio_upload = gr.File(label="Upload an Audio File", type="binary") |
|
upload_transcribe_button = gr.Button("Transcribe Audio", elem_id="transcribe_button") |
|
upload_transcription = gr.Textbox(label="Transcription", interactive=False) |
|
upload_audio_output = gr.Textbox(label="Response", interactive=False) |
|
upload_audio_button = gr.Button("Ask", elem_id="ask_button") |
|
|
|
with gr.Tab("Record Audio"): |
|
|
|
audio_recorder = gr.Audio(label="Record your voice", type="filepath") |
|
record_transcribe_button = gr.Button("Transcribe Recording", elem_id="transcribe_button") |
|
record_transcription = gr.Textbox(label="Transcription", interactive=False) |
|
record_audio_output = gr.Textbox(label="Response", interactive=False) |
|
record_audio_button = gr.Button("Ask", elem_id="ask_button") |
|
|
|
|
|
clear_button = gr.Button("Clear Chat", elem_id="clear_chat_button") |
|
|
|
|
|
api_key_button.click(set_api_key, inputs=[api_key_input], outputs=[api_key_output]) |
|
image_url_button.click(image_url_chat, [image_url, image_query, temperature, top_p, max_output_tokens], image_url_output) |
|
text_button.click(text_chat, [text_query, temperature, top_p, max_output_tokens], text_output) |
|
image_button.click(image_chat, [image_upload, image_text_query, temperature, top_p, max_output_tokens], image_output) |
|
pdf_button.click(pdf_chat, [pdf_upload, pdf_text_query, temperature, top_p, max_output_tokens], pdf_output) |
|
|
|
|
|
upload_transcribe_button.click( |
|
process_uploaded_audio, |
|
inputs=[audio_upload], |
|
outputs=[upload_transcription] |
|
) |
|
|
|
|
|
upload_audio_button.click( |
|
process_voice_query, |
|
inputs=[upload_transcription, temperature, top_p, max_output_tokens], |
|
outputs=[upload_audio_output] |
|
) |
|
|
|
|
|
record_transcribe_button.click( |
|
process_recorded_audio, |
|
inputs=[audio_recorder], |
|
outputs=[record_transcription] |
|
) |
|
|
|
|
|
record_audio_button.click( |
|
process_voice_query, |
|
inputs=[record_transcription, temperature, top_p, max_output_tokens], |
|
outputs=[record_audio_output] |
|
) |
|
|
|
|
|
clear_button.click( |
|
clear_chat, |
|
outputs=[ |
|
image_url, image_query, image_url_output, |
|
text_query, text_output, |
|
image_text_query, image_output, |
|
pdf_upload, pdf_text_query, pdf_output, |
|
audio_upload, upload_transcription, upload_audio_output, |
|
audio_recorder, record_transcription, record_audio_output, |
|
temperature, top_p, max_output_tokens |
|
] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |