shukdevdatta123's picture
Update app.py
cdd2b64 verified
raw
history blame
15.7 kB
import gradio as gr
import openai
import fitz # PyMuPDF for PDF processing
import base64
import io
# Variable to store API key
api_key = ""
# Function to update API key
def set_api_key(key):
global api_key
api_key = key
return "API Key Set Successfully!"
# Function to interact with OpenAI API
def query_openai(messages, temperature, top_p, max_output_tokens):
if not api_key:
return "Please enter your OpenAI API key first."
try:
openai.api_key = api_key # Set API key dynamically
# Ensure numeric values for OpenAI parameters
temperature = float(temperature) if temperature else 1.0
top_p = float(top_p) if top_p else 1.0
max_output_tokens = int(max_output_tokens) if max_output_tokens else 2048
response = openai.ChatCompletion.create(
model="gpt-4.5-preview",
messages=messages,
temperature=temperature,
top_p=top_p,
max_tokens=max_output_tokens
)
return response["choices"][0]["message"]["content"]
except Exception as e:
return f"Error: {str(e)}"
# Function to process image URL input
def image_url_chat(image_url, text_query, temperature, top_p, max_output_tokens):
if not image_url or not text_query:
return "Please provide an image URL and a query."
messages = [
{"role": "user", "content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": text_query}
]},
]
return query_openai(messages, temperature, top_p, max_output_tokens)
# Function to process text input
def text_chat(text_query, temperature, top_p, max_output_tokens):
if not text_query:
return "Please enter a query."
messages = [{"role": "user", "content": [{"type": "text", "text": text_query}]}]
return query_openai(messages, temperature, top_p, max_output_tokens)
# Function to process uploaded image input
def image_chat(image_file, text_query, temperature, top_p, max_output_tokens):
if image_file is None or not text_query:
return "Please upload an image and provide a query."
# Encode image as base64
with open(image_file, "rb") as img:
base64_image = base64.b64encode(img.read()).decode("utf-8")
image_data = f"data:image/jpeg;base64,{base64_image}"
messages = [
{"role": "user", "content": [
{"type": "image_url", "image_url": {"url": image_data}},
{"type": "text", "text": text_query}
]},
]
return query_openai(messages, temperature, top_p, max_output_tokens)
# Function to process uploaded PDF input
def pdf_chat(pdf_file, text_query, temperature, top_p, max_output_tokens):
if pdf_file is None or not text_query:
return "Please upload a PDF and provide a query."
try:
# Extract text from all pages of the PDF
doc = fitz.open(pdf_file.name)
text = "\n".join([page.get_text("text") for page in doc]) # Extract text from all pages
# If no text found, return an error
if not text.strip():
return "No text found in the PDF."
# Create the query message with the extracted text and the user's query
messages = [
{"role": "user", "content": [
{"type": "text", "text": text}, # The extracted text from the PDF
{"type": "text", "text": text_query}
]},
]
return query_openai(messages, temperature, top_p, max_output_tokens)
except Exception as e:
return f"Error processing the PDF: {str(e)}"
# Function to transcribe audio to text using OpenAI Whisper API
def transcribe_audio(audio_binary, openai_api_key):
if not openai_api_key:
return "Error: No API key provided."
openai.api_key = openai_api_key
try:
# Use the correct transcription API call
audio_file_obj = io.BytesIO(audio_binary)
audio_file_obj.name = 'audio.wav' # Set a name for the file object (as OpenAI expects it)
# Transcribe the audio to text using OpenAI's whisper model
audio_file_transcription = openai.Audio.transcribe(file=audio_file_obj, model="whisper-1")
return audio_file_transcription.text
except Exception as e:
return f"Error transcribing audio: {str(e)}"
# Function to handle uploaded audio transcription
def process_uploaded_audio(audio_binary):
if not audio_binary:
return "Please upload an audio file first."
if not api_key:
return "Please enter your OpenAI API key first."
try:
transcription = transcribe_audio(audio_binary, api_key)
return transcription
except Exception as e:
return f"Error transcribing audio: {str(e)}"
# Function to handle recorded audio transcription
def process_recorded_audio(audio_path):
if not audio_path:
return "No audio recorded."
if not api_key:
return "Please enter your OpenAI API key first."
try:
with open(audio_path, "rb") as audio_file:
audio_binary = audio_file.read()
transcription = transcribe_audio(audio_binary, api_key)
return transcription
except Exception as e:
return f"Error transcribing recorded audio: {str(e)}"
# Function to process the voice chat queries
def process_voice_query(transcription, temperature, top_p, max_output_tokens):
if not transcription or transcription.startswith("Error") or transcription.startswith("Please"):
return "Please ensure audio is transcribed successfully first."
# Use the transcription as the query
messages = [{"role": "user", "content": [{"type": "text", "text": transcription}]}]
return query_openai(messages, temperature, top_p, max_output_tokens)
# Function to clear the chat - FIXED to return the correct types for file inputs
def clear_chat():
# For file components like gr.File and gr.Audio, we should return None
# For text components, return empty string
# For sliders, return default values
# The order must match exactly with the outputs in clear_button.click()
return (
"", # image_url (textbox)
"", # image_query (textbox)
"", # image_url_output (textbox)
"", # text_query (textbox)
"", # text_output (textbox)
"", # image_text_query (textbox)
"", # image_output (textbox)
None, # pdf_upload (file)
"", # pdf_text_query (textbox)
"", # pdf_output (textbox)
None, # audio_upload (file)
"", # upload_transcription (textbox)
"", # upload_audio_output (textbox)
None, # audio_recorder (audio)
"", # record_transcription (textbox)
"", # record_audio_output (textbox)
1.0, # temperature (slider)
1.0, # top_p (slider)
2048 # max_output_tokens (slider)
)
# Gradio UI Layout
with gr.Blocks() as demo:
gr.Markdown("## GPT-4.5 Preview Chatbot")
with gr.Accordion("How to Use This App!", open=False):
gr.Markdown("""
### Getting Started:
1. Enter your OpenAI API key in the field at the top and click "Set API Key"
2. Adjust the hyperparameters if needed (Temperature, Top-P, Max Output Tokens)
### Using the Different Tabs:
#### Image URL Chat
- Paste an image URL in the field
- Enter your question about the image
- Click "Ask" to get a response
#### Text Chat
- Simply type your query in the text field
- Click "Ask" to get a response
#### Image Chat
- Upload an image from your device
- Enter your question about the uploaded image
- Click "Ask" to get a response
#### PDF Chat
- Upload a PDF document
- Ask questions about the PDF content
- Click "Ask" to get a response
#### Voice Chat
- **Upload Audio:** Upload an audio file, click "Transcribe Audio", then click "Ask"
- **Record Audio:** Record your voice, click "Transcribe Recording", then click "Ask"
### Tips:
- Use the "Clear Chat" button to reset all fields
- For more creative responses, try increasing the Temperature
- For longer responses, increase the Max Output Tokens
""")
# Accordion for explaining hyperparameters
with gr.Accordion("Hyperparameters", open=False):
gr.Markdown("""
### Temperature:
Controls the randomness of the model's output. A lower temperature makes the model more deterministic, while a higher temperature makes it more creative and varied.
### Top-P (Nucleus Sampling):
Controls the cumulative probability distribution from which the model picks the next word. A lower value makes the model more focused and deterministic, while a higher value increases randomness.
### Max Output Tokens:
Limits the number of tokens (words or subwords) the model can generate in its response. You can use this to control the length of the response.
""")
gr.HTML("""
<style>
#api_key_button {
margin-top: 27px; /* Add margin-top to the button */
background: linear-gradient(135deg, #4a00e0 0%, #8e2de2 100%); /* Purple gradient */
}
#api_key_button:hover {
background: linear-gradient(135deg, #5b10f1 0%, #9f3ef3 100%); /* Slightly lighter */
}
#clear_chat_button {
background: linear-gradient(135deg, #e53e3e 0%, #f56565 100%); /* Red gradient */
}
#clear_chat_button:hover {
background: linear-gradient(135deg, #c53030 0%, #e53e3e 100%); /* Slightly darker red gradient on hover */
}
#ask_button {
background: linear-gradient(135deg, #fbd38d 0%, #f6e05e 100%); /* Yellow gradient */
}
#ask_button:hover {
background: linear-gradient(135deg, #ecc94b 0%, #fbd38d 100%); /* Slightly darker yellow gradient on hover */
}
#transcribe_button {
background: linear-gradient(135deg, #68d391 0%, #48bb78 100%); /* Green gradient */
}
#transcribe_button:hover {
background: linear-gradient(135deg, #38a169 0%, #68d391 100%); /* Slightly darker green gradient on hover */
}
</style>
""")
# API Key Input
with gr.Row():
api_key_input = gr.Textbox(label="Enter OpenAI API Key", type="password")
api_key_button = gr.Button("Set API Key", elem_id="api_key_button")
api_key_output = gr.Textbox(label="API Key Status", interactive=False)
with gr.Row():
temperature = gr.Slider(0, 2, value=1.0, step=0.1, label="Temperature")
top_p = gr.Slider(0, 1, value=1.0, step=0.1, label="Top-P")
max_output_tokens = gr.Slider(0, 16384, value=2048, step=512, label="Max Output Tokens")
with gr.Tabs():
with gr.Tab("Image URL Chat"):
image_url = gr.Textbox(label="Enter Image URL")
image_query = gr.Textbox(label="Ask about the Image")
image_url_output = gr.Textbox(label="Response", interactive=False)
image_url_button = gr.Button("Ask", elem_id="ask_button")
with gr.Tab("Text Chat"):
text_query = gr.Textbox(label="Enter your query")
text_output = gr.Textbox(label="Response", interactive=False)
text_button = gr.Button("Ask", elem_id="ask_button")
with gr.Tab("Image Chat"):
image_upload = gr.File(label="Upload an Image", type="filepath")
image_text_query = gr.Textbox(label="Ask about the uploaded image")
image_output = gr.Textbox(label="Response", interactive=False)
image_button = gr.Button("Ask", elem_id="ask_button")
with gr.Tab("PDF Chat"):
pdf_upload = gr.File(label="Upload a PDF", type="filepath")
pdf_text_query = gr.Textbox(label="Ask about the uploaded PDF")
pdf_output = gr.Textbox(label="Response", interactive=False)
pdf_button = gr.Button("Ask", elem_id="ask_button")
with gr.Tab("Voice Chat"):
with gr.Tabs():
with gr.Tab("Upload Audio"):
# Upload audio section
audio_upload = gr.File(label="Upload an Audio File", type="binary")
upload_transcribe_button = gr.Button("Transcribe Audio", elem_id="transcribe_button")
upload_transcription = gr.Textbox(label="Transcription", interactive=False)
upload_audio_output = gr.Textbox(label="Response", interactive=False)
upload_audio_button = gr.Button("Ask", elem_id="ask_button")
with gr.Tab("Record Audio"):
# Record audio section
audio_recorder = gr.Audio(label="Record your voice", type="filepath")
record_transcribe_button = gr.Button("Transcribe Recording", elem_id="transcribe_button")
record_transcription = gr.Textbox(label="Transcription", interactive=False)
record_audio_output = gr.Textbox(label="Response", interactive=False)
record_audio_button = gr.Button("Ask", elem_id="ask_button")
# Clear chat button
clear_button = gr.Button("Clear Chat", elem_id="clear_chat_button")
# Button Click Actions
api_key_button.click(set_api_key, inputs=[api_key_input], outputs=[api_key_output])
image_url_button.click(image_url_chat, [image_url, image_query, temperature, top_p, max_output_tokens], image_url_output)
text_button.click(text_chat, [text_query, temperature, top_p, max_output_tokens], text_output)
image_button.click(image_chat, [image_upload, image_text_query, temperature, top_p, max_output_tokens], image_output)
pdf_button.click(pdf_chat, [pdf_upload, pdf_text_query, temperature, top_p, max_output_tokens], pdf_output)
# Voice Chat - Upload Audio tab actions
upload_transcribe_button.click(
process_uploaded_audio,
inputs=[audio_upload],
outputs=[upload_transcription]
)
# FIXED: Properly order the inputs to process_voice_query
upload_audio_button.click(
process_voice_query,
inputs=[upload_transcription, temperature, top_p, max_output_tokens],
outputs=[upload_audio_output]
)
# Voice Chat - Record Audio tab actions
record_transcribe_button.click(
process_recorded_audio,
inputs=[audio_recorder],
outputs=[record_transcription]
)
# FIXED: Properly order the inputs to process_voice_query
record_audio_button.click(
process_voice_query,
inputs=[record_transcription, temperature, top_p, max_output_tokens],
outputs=[record_audio_output]
)
# Clear button resets all necessary fields
clear_button.click(
clear_chat,
outputs=[
image_url, image_query, image_url_output,
text_query, text_output,
image_text_query, image_output,
pdf_upload, pdf_text_query, pdf_output,
audio_upload, upload_transcription, upload_audio_output,
audio_recorder, record_transcription, record_audio_output,
temperature, top_p, max_output_tokens
]
)
# Launch Gradio App
if __name__ == "__main__":
demo.launch()