import os import streamlit as st import json from streamlit_option_menu import option_menu from gemini_utility import (load_gemini_pro, gemini_pro_vision_responce) from PIL import Image import time # Configuraci贸n de la p谩gina st.set_page_config( page_title="GnosticDev AI", page_icon="馃", layout="centered", initial_sidebar_state="expanded", ) # Add the missing translation function def translate_role_to_streamlit(role): """ Translate Gemini message roles to Streamlit chat roles """ role_mapping = { 'model': 'assistant', 'user': 'user' } return role_mapping.get(role, 'assistant') # Archivo para almacenamiento permanente PROMPTS_FILE = "training_prompts.json" # Funciones para manejar el almacenamiento permanente def load_prompts(): try: if os.path.exists(PROMPTS_FILE): with open(PROMPTS_FILE, 'r', encoding='utf-8') as file: return json.load(file) except Exception as e: st.error(f"Error cargando prompts: {e}") return {"prompts": [], "current_prompt": ""} def save_prompts(prompts_data): try: with open(PROMPTS_FILE, 'w', encoding='utf-8') as file: json.dump(prompts_data, file, ensure_ascii=False, indent=2) except Exception as e: st.error(f"Error guardando prompts: {e}") # Inicializaci贸n de datos if 'prompts_data' not in st.session_state: st.session_state.prompts_data = load_prompts() with st.sidebar: selected = option_menu( "GD AI", ["System Prompts", "Chatbot", "Image Captioning"], menu_icon="robot", icons=['gear', 'chat-dots-fill', 'image-fill'], default_index=0 ) if selected == "System Prompts": st.title("Gesti贸n de System Prompts") # 脕rea para nuevo prompt new_prompt = st.text_area( "Nuevo System Prompt", value="", height=200, help="Escribe aqu铆 las nuevas instrucciones para el AI" ) col1, col2 = st.columns([1, 2]) with col1: if st.button("A帽adir Nuevo Prompt"): if new_prompt and new_prompt not in st.session_state.prompts_data["prompts"]: st.session_state.prompts_data["prompts"].append(new_prompt) save_prompts(st.session_state.prompts_data) st.success("Nuevo prompt a帽adido!") time.sleep(1) st.rerun() # Lista de prompts guardados st.markdown("### Prompts Guardados") for i, prompt in enumerate(st.session_state.prompts_data["prompts"]): with st.expander(f"Prompt {i+1}"): st.text_area("", prompt, height=100, key=f"prompt_{i}", disabled=True) col1, col2, col3 = st.columns([1, 1, 1]) with col1: if st.button("Usar este prompt", key=f"use_{i}"): st.session_state.prompts_data["current_prompt"] = prompt save_prompts(st.session_state.prompts_data) if "chat_session" in st.session_state: del st.session_state.chat_session st.success("Prompt activado!") with col2: if st.button("Editar", key=f"edit_{i}"): st.session_state.editing_prompt = i st.session_state.editing_text = prompt with col3: if st.button("Eliminar", key=f"delete_{i}"): st.session_state.prompts_data["prompts"].pop(i) save_prompts(st.session_state.prompts_data) st.success("Prompt eliminado!") time.sleep(1) st.rerun() # Mostrar prompt actual st.markdown("### Prompt Actual") current_prompt = st.session_state.prompts_data.get("current_prompt", "") if current_prompt: st.info(current_prompt) else: st.warning("No hay prompt activo") elif selected == "Chatbot": model = load_gemini_pro() # Initialize chat session with current prompt if "chat_session" not in st.session_state: st.session_state.chat_session = model.start_chat(history=[]) # Enviar el prompt actual de manera silenciosa (sin mostrarlo en la interfaz) current_prompt = st.session_state.prompts_data.get("current_prompt") if current_prompt: st.session_state.chat_session.send_message(current_prompt) # Agregar mensaje de bienvenida try: welcome_message = "隆Hola! Soy GnosticDev AI. 驴En qu茅 puedo ayudarte hoy?" with st.chat_message("assistant"): st.markdown(welcome_message) except Exception as e: st.error(f"Error al mostrar mensaje de bienvenida: {str(e)}") st.title("Gnosticdev Chatbot") # Display chat history if hasattr(st.session_state.chat_session, 'history'): for message in st.session_state.chat_session.history: # Skip displaying the system prompt message if message.role == 'model' and message.parts[0].text == st.session_state.prompts_data.get("current_prompt"): continue role = translate_role_to_streamlit(message.role) with st.chat_message(role): st.markdown(message.parts[0].text) # Chat input user_prompt = st.chat_input("Preguntame algo...") if user_prompt: st.chat_message("user").markdown(user_prompt) try: gemini_response = st.session_state.chat_session.send_message(user_prompt) with st.chat_message("assistant"): st.markdown(gemini_response.text) except Exception as e: st.error(f"Error en la respuesta: {str(e)}") elif selected == "Image Captioning": st.title("Image Caption Generation馃摳") # Add custom prompt option use_custom_prompt = st.checkbox("Usar prompt personalizado") if use_custom_prompt: custom_prompt = st.text_area( "Escribe tu prompt personalizado", value="Write a caption for this image", help="Puedes personalizar las instrucciones para el an谩lisis de la imagen" ) upload_image = st.file_uploader("Upload an image...", type=["jpg", "jpeg", "png"]) if upload_image: try: image = Image.open(upload_image) col1, col2 = st.columns(2) with col1: st.image(image, caption="Uploaded Image", use_column_width=True) image_info = f"Dimensiones: {image.size[0]}x{image.size[1]}" st.text(image_info) if st.button("Generate", key="generate_caption"): with st.spinner("Generando descripci贸n..."): prompt = custom_prompt if use_custom_prompt else "Write a caption for this image" try: caption = gemini_pro_vision_responce(prompt, image) with col2: st.success("隆Descripci贸n generada!") st.info(caption) # Opci贸n para guardar la descripci贸n if st.button("Guardar descripci贸n"): timestamp = time.strftime("%Y%m%d-%H%M%S") filename = f"caption_{timestamp}.txt" try: with open(filename, "w", encoding="utf-8") as f: f.write(caption) st.success(f"Descripci贸n guardada en {filename}") except Exception as e: st.error(f"Error al guardar la descripci贸n: {str(e)}") except Exception as e: st.error(f"Error al generar la descripci贸n: {str(e)}") st.error("Por favor, intenta con otra imagen o revisa tu conexi贸n") except Exception as e: st.error(f"Error al procesar la imagen: {str(e)}") st.error("Por favor, aseg煤rate de que el archivo es una imagen v谩lida") # A帽adir footer st.markdown("---") col1, col2, col3 = st.columns(3) with col1: st.markdown("**GnosticDev AI**") with col2: st.markdown("Versi贸n 1.0.0") with col3: st.markdown("Made with 鉂わ笍 by GnosticDev")