import os import streamlit as st from streamlit_option_menu import option_menu from gemini_utility import (load_gemini_pro, gemini_pro_vision_responce) from PIL import Image # Setting the page config st.set_page_config( page_title="GnosticDev AI", page_icon="🤖", layout="centered", initial_sidebar_state="expanded", ) # Inicializar el system prompt en session state si no existe if "system_prompt" not in st.session_state: st.session_state.system_prompt = "" with st.sidebar: selected = option_menu( "GD AI", ["System Prompt", "Chatbot", "Image Captioning"], menu_icon="robot", icons=['gear', 'chat-dots-fill', 'image-fill'], default_index=0 ) def translate_role_to_streamlit(user_role): if user_role == "model": return "assistant" else: return user_role if selected == "System Prompt": st.title("Configuración del System Prompt") # Área para editar el system prompt new_system_prompt = st.text_area( "Ingresa las instrucciones para el AI (System Prompt)", value=st.session_state.system_prompt, height=300, help="Escribe aquí las instrucciones que definirán el comportamiento del AI" ) if st.button("Guardar System Prompt"): st.session_state.system_prompt = new_system_prompt if "chat_session" in st.session_state: del st.session_state.chat_session # Reset chat session st.success("System Prompt actualizado con éxito!") # Mostrar el prompt actual if st.session_state.system_prompt: st.markdown("### System Prompt Actual:") st.info(st.session_state.system_prompt) elif selected == "Chatbot": model = load_gemini_pro() # Initialize chat session with system prompt if "chat_session" not in st.session_state: st.session_state.chat_session = model.start_chat(history=[]) if st.session_state.system_prompt: st.session_state.chat_session.send_message(st.session_state.system_prompt) st.title("Gnosticdev Chatbot") # Mostrar el system prompt actual en un expander if st.session_state.system_prompt: with st.expander("Ver System Prompt actual"): st.info(st.session_state.system_prompt) # Display the chatbot history for message in st.session_state.chat_session.history: with st.chat_message(translate_role_to_streamlit(message.role)): st.markdown(message.parts[0].text) # Input field for user's message user_prompt = st.chat_input("Preguntame algo...") if user_prompt: st.chat_message("user").markdown(user_prompt) gemini_response = st.session_state.chat_session.send_message(user_prompt) with st.chat_message("assistant"): st.markdown(gemini_response.text) elif selected == "Image Captioning": st.title("Image Caption Generation📸") upload_image = st.file_uploader("Upload an image...", type=["jpg", "jpeg", "png"]) if upload_image and st.button("Generate"): image = Image.open(upload_image) col1, col2 = st.columns(2) with col1: st.image(image, caption="Uploaded Image", use_column_width=True) default_prompt = "Write a caption for this image" caption = gemini_pro_vision_responce(default_prompt, image) with col2: st.info(caption)