ModalChat / app.py
typesdigital's picture
Update app.py
bbb9aa4 verified
raw
history blame
2.01 kB
import gradio as gr
import google.generativeai as genai
from PIL import Image
import os
import io
# Configure the Gemini API
genai.configure(api_key=os.environ.get("AIzaSyCFdxcKVO6VSxEBaNE2W3LIvRLPEPpyMGw"))
# Set up the model
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
model = genai.GenerativeModel(
model_name="gemini-1.5-flash",
generation_config=generation_config,
)
def image_to_byte_array(image: Image) -> bytes:
imgByteArr = io.BytesIO()
image.save(imgByteArr, format=image.format)
imgByteArr = imgByteArr.getvalue()
return imgByteArr
def chat_with_gemini(history, user_message, image):
history = history or []
try:
if image is not None:
# Convert image to byte array
image_bytes = image_to_byte_array(image)
# Create a Content object for the image
image_parts = [{"mime_type": "image/jpeg", "data": image_bytes}]
prompt_parts = [user_message] + image_parts
else:
prompt_parts = [user_message]
# Generate content
response = model.generate_content(prompt_parts)
response_text = response.text
history.append((user_message, response_text))
except Exception as e:
error_message = f"An error occurred: {str(e)}"
history.append((user_message, error_message))
return history, history
def clear_conversation():
return None
# Define the Gradio interface
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="Chat with Gemini 1.5 Flash")
msg = gr.Textbox(label="Type your message here")
clear = gr.Button("Clear")
image_upload = gr.Image(type="pil", label="Upload an image (optional)")
msg.submit(chat_with_gemini, [chatbot, msg, image_upload], [chatbot, chatbot])
clear.click(clear_conversation, outputs=[chatbot])
# Launch the app
if __name__ == "__main__":
demo.launch()