File size: 5,400 Bytes
4227970
59bfaac
fa92574
 
509b02d
fa92574
9dd3d1d
fa92574
9dd3d1d
fa92574
 
 
4227970
fa92574
 
 
9dd3d1d
 
fa92574
9dd3d1d
 
 
 
 
 
 
 
 
fa92574
9dd3d1d
 
 
 
 
 
 
 
 
 
300e083
4227970
 
 
9dd3d1d
4227970
4b02bb7
4227970
2b28b66
4b02bb7
9dd3d1d
 
300e083
9dd3d1d
 
 
 
 
 
48344ea
300e083
9dd3d1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4b02bb7
 
f54757c
300e083
9dd3d1d
2251b3a
9dd3d1d
 
 
 
a36d2f3
f54757c
300e083
9dd3d1d
 
 
300e083
9dd3d1d
300e083
9dd3d1d
300e083
 
 
 
9dd3d1d
48344ea
f54757c
300e083
 
f54757c
 
a36d2f3
f54757c
 
b6dc07e
 
 
 
 
 
 
 
 
300e083
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
import os
import streamlit as st
import json
from streamlit_option_menu import option_menu
from gemini_utility import (load_gemini_pro, gemini_pro_vision_responce)
from PIL import Image
import time

# Configuraci贸n de la p谩gina
st.set_page_config(
    page_title="GnosticDev AI",
    page_icon="馃",
    layout="centered",
    initial_sidebar_state="expanded",
)

# Archivo para almacenamiento permanente
PROMPTS_FILE = "training_prompts.json"

# Funciones para manejar el almacenamiento permanente
def load_prompts():
    try:
        if os.path.exists(PROMPTS_FILE):
            with open(PROMPTS_FILE, 'r', encoding='utf-8') as file:
                return json.load(file)
    except Exception as e:
        st.error(f"Error cargando prompts: {e}")
    return {"prompts": [], "current_prompt": ""}

def save_prompts(prompts_data):
    try:
        with open(PROMPTS_FILE, 'w', encoding='utf-8') as file:
            json.dump(prompts_data, file, ensure_ascii=False, indent=2)
    except Exception as e:
        st.error(f"Error guardando prompts: {e}")

# Inicializaci贸n de datos
if 'prompts_data' not in st.session_state:
    st.session_state.prompts_data = load_prompts()

with st.sidebar:
    selected = option_menu(
        "GD AI",
        ["System Prompts", "Chatbot", "Image Captioning"],
        menu_icon="robot",
        icons=['gear', 'chat-dots-fill', 'image-fill'],
        default_index=0
    )

if selected == "System Prompts":
    st.title("Gesti贸n de System Prompts")
    
    # 脕rea para nuevo prompt
    new_prompt = st.text_area(
        "Nuevo System Prompt",
        value="",
        height=200,
        help="Escribe aqu铆 las nuevas instrucciones para el AI"
    )
    
    col1, col2 = st.columns([1, 2])
    with col1:
        if st.button("A帽adir Nuevo Prompt"):
            if new_prompt and new_prompt not in st.session_state.prompts_data["prompts"]:
                st.session_state.prompts_data["prompts"].append(new_prompt)
                save_prompts(st.session_state.prompts_data)
                st.success("Nuevo prompt a帽adido!")
                time.sleep(1)
                st.rerun()
    
    # Lista de prompts guardados
    st.markdown("### Prompts Guardados")
    for i, prompt in enumerate(st.session_state.prompts_data["prompts"]):
        with st.expander(f"Prompt {i+1}"):
            st.text_area("", prompt, height=100, key=f"prompt_{i}", disabled=True)
            col1, col2, col3 = st.columns([1, 1, 1])
            with col1:
                if st.button("Usar este prompt", key=f"use_{i}"):
                    st.session_state.prompts_data["current_prompt"] = prompt
                    save_prompts(st.session_state.prompts_data)
                    if "chat_session" in st.session_state:
                        del st.session_state.chat_session
                    st.success("Prompt activado!")
            with col2:
                if st.button("Editar", key=f"edit_{i}"):
                    st.session_state.editing_prompt = i
                    st.session_state.editing_text = prompt
            with col3:
                if st.button("Eliminar", key=f"delete_{i}"):
                    st.session_state.prompts_data["prompts"].pop(i)
                    save_prompts(st.session_state.prompts_data)
                    st.success("Prompt eliminado!")
                    time.sleep(1)
                    st.rerun()
    
    # Mostrar prompt actual
    st.markdown("### Prompt Actual")
    current_prompt = st.session_state.prompts_data.get("current_prompt", "")
    if current_prompt:
        st.info(current_prompt)
    else:
        st.warning("No hay prompt activo")

elif selected == "Chatbot":
    model = load_gemini_pro()
    
    # Initialize chat session with current prompt
    if "chat_session" not in st.session_state:
        st.session_state.chat_session = model.start_chat(history=[])
        current_prompt = st.session_state.prompts_data.get("current_prompt")
        if current_prompt:
            st.session_state.chat_session.send_message(current_prompt)

    st.title("Gnosticdev Chatbot")
    
    # Mostrar prompt actual
    current_prompt = st.session_state.prompts_data.get("current_prompt")
    if current_prompt:
        with st.expander("Ver System Prompt actual"):
            st.info(current_prompt)
    
    # Display chat history
    for message in st.session_state.chat_session.history:
        with st.chat_message(translate_role_to_streamlit(message.role)):
            st.markdown(message.parts[0].text)

    # Chat input
    user_prompt = st.chat_input("Preguntame algo...")
    if user_prompt:
        st.chat_message("user").markdown(user_prompt)
        gemini_response = st.session_state.chat_session.send_message(user_prompt)
        with st.chat_message("assistant"):
            st.markdown(gemini_response.text)

elif selected == "Image Captioning":
    st.title("Image Caption Generation馃摳")
    upload_image = st.file_uploader("Upload an image...", type=["jpg", "jpeg", "png"])
    if upload_image and st.button("Generate"):
        image = Image.open(upload_image)
        col1, col2 = st.columns(2)
        with col1:
            st.image(image, caption="Uploaded Image", use_column_width=True)
        default_prompt = "Write a caption for this image"
        caption = gemini_pro_vision_responce(default_prompt, image)
        with col2:
            st.info(caption)