File size: 6,769 Bytes
846e270
 
9cf8e68
 
 
d4904e9
 
9cf8e68
846e270
72f42a0
9cf8e68
 
72f42a0
 
 
 
 
2bfa474
71f38ca
9cf8e68
71f38ca
9cf8e68
1897897
 
 
b3f1f98
1093cd0
1897897
e3a1f9f
1897897
e3a1f9f
 
 
b3f1f98
e3a1f9f
1897897
e3a1f9f
 
 
1897897
d4904e9
72f42a0
d4904e9
9cf8e68
2bfa474
 
72f42a0
7bcbfac
2bfa474
72f42a0
 
 
 
 
 
 
2bfa474
 
9cf8e68
72f42a0
 
 
 
 
 
 
 
 
9cf8e68
 
2bfa474
72f42a0
9cf8e68
 
72f42a0
 
 
b53f7b3
72f42a0
 
9cf8e68
2bfa474
9cf8e68
72f42a0
 
9cf8e68
 
 
 
72f42a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1af1861
72f42a0
 
9cf8e68
2bfa474
9cf8e68
72f42a0
 
 
b9f1b65
2bfa474
72f42a0
 
 
 
 
8c1883c
 
 
 
 
72f42a0
8c1883c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72f42a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
import os
from dotenv import find_dotenv, load_dotenv
import streamlit as st
from typing import Generator
from groq import Groq
import datetime
import json

_ = load_dotenv(find_dotenv())
st.set_page_config(page_icon="💬", layout="wide", page_title="...")

def icon(emoji: str):
    """Shows an emoji as a Notion-style page icon."""
    st.write(
        f'<span style="font-size: 78px; line-height: 1">{emoji}</span>',
        unsafe_allow_html=True,
    )

icon("⚡")

st.subheader("Chatbot", divider="rainbow", anchor=False)

# LOGIN
password = os.environ['PASSWORD']

def login(password_input):
    if password_input == password:
        st.session_state.authenticated = True
        return  # Exit the function after successful login

    st.error("Invalid username or password")

login_container = st.container()

# Password input and button inside the conditional block
if "authenticated" not in st.session_state or not st.session_state.authenticated:
    with login_container:
        password_input = st.text_input("Password", type="password")
        st.button("authenticate", on_click=login, args=password_input)

client = Groq(
    api_key=os.environ['GROQ_API_KEY'],
)

# Initialize chat history and selected model
if "messages" not in st.session_state:
    st.session_state.messages = []

if "selected_model" not in st.session_state:
    st.session_state.selected_model = None

# prompts
prompts = {
    "none": "",
    "python interpreter": "emulate the output of this program like you are the python interpreter, only answer with the result of this emulation. Ask the user for each missing input, sequentially and only once per message, in the same way a python interpreter would. Do not fill in for my inputs. Take my inputs from the message directly after you ask for input."
}

# Define model details
models = {
    "mixtral-8x7b-32768": {
        "name": "Mixtral-8x7b-Instruct-v0.1",
        "tokens": 32768,
        "developer": "Mistral",
    },
    "gemma-7b-it": {"name": "Gemma-7b-it", "tokens": 8192, "developer": "Google"},
    "llama2-70b-4096": {"name": "LLaMA2-70b-chat", "tokens": 4096, "developer": "Meta"},
    "llama3-70b-8192": {"name": "LLaMA3-70b-8192", "tokens": 8192, "developer": "Meta"},
    "llama3-8b-8192": {"name": "LLaMA3-8b-8192", "tokens": 8192, "developer": "Meta"},
}

# Layout for model selection and max_tokens slider
col1, col2, col3 = st.columns(3)

with col1:
    model_option = st.selectbox(
        "Choose a model:",
        options=list(models.keys()),
        format_func=lambda x: x,
        index=0,  # Default to the first model in the list
    )

# Detect model change and clear chat history if model has changed
if st.session_state.selected_model != model_option:
    st.session_state.messages = []
    st.session_state.selected_model = model_option

max_tokens_range = models[model_option]["tokens"]

with col2:
    # Adjust max_tokens slider dynamically based on the selected model
    max_tokens = st.slider(
        "Max Tokens:",
        min_value=512,  # Minimum value to allow some flexibility
        max_value=max_tokens_range,
        # Default value or max allowed if less
        value=min(32768, max_tokens_range),
        step=512,
        help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
    )
    
with col3:
   prompt_selection = st.selectbox(
       "Choose a prompt:",
       options=list(prompts.keys()),
       format_func=lambda x: x,
       index=0,
   )

# Display chat messages from history on app rerun
for message in st.session_state.messages:
    avatar = "🧠" if message["role"] == "assistant" else "❓"
    with st.chat_message(message["role"], avatar=avatar):
        st.markdown(message["content"])

def generate_chat_responses(chat_completion) -> Generator[str, None, None]:
    """Yield chat response content from the Groq API response."""
    for chunk in chat_completion:
        if chunk.choices[0].delta.content:
            yield chunk.choices[0].delta.content

if prompt := st.chat_input("Enter your prompt here..."):
    st.session_state.messages.append({"role": "user", "content": prompt})

    with st.chat_message("user", avatar="❓"):  
        st.markdown(prompt)

    # Fetch response from Groq API
    try:
        chat_completion = client.chat.completions.create(
            model=model_option,
            messages=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            max_tokens=max_tokens,
            stream=True,
        )

        # Use the generator function with st.write_stream
        with st.chat_message("assistant", avatar="🧠"):
            chat_responses_generator = generate_chat_responses(chat_completion)
            full_response = st.write_stream(chat_responses_generator)
    except Exception as e:
        st.error(e, icon="🚨")

    # Append the full response to session_state.messages
    if isinstance(full_response, str):
        st.session_state.messages.append(
            {"role": "assistant", "content": full_response}
        )
    else:
        # Handle the case where full_response is not a string
        combined_response = "\n".join(str(item) for item in full_response)
        st.session_state.messages.append(
            {"role": "assistant", "content": combined_response}
        )
        
if prompt := prompts.get(prompt_selection):
    st.session_state.messages.append({"role": "user", "content": prompt})

    with st.chat_message("user", avatar="❓"):  
        st.markdown(prompt)

    # Fetch response from Groq API
    try:
        chat_completion = client.chat.completions.create(
            model=model_option,
            messages=[
                {"role": m["role"], "content": m["content"]}
                for m in st.session_state.messages
            ],
            max_tokens=max_tokens,
            stream=True,
        )

        # Use the generator function with st.write_stream
        with st.chat_message("assistant", avatar="🧠"):
            chat_responses_generator = generate_chat_responses(chat_completion)
            full_response = st.write_stream(chat_responses_generator)
    except Exception as e:
        st.error(e, icon="🚨")

    # Append the full response to session_state.messages
    if isinstance(full_response, str):
        st.session_state.messages.append(
            {"role": "assistant", "content": full_response}
        )
    else:
        # Handle the case where full_response is not a string
        combined_response = "\n".join(str(item) for item in full_response)
        st.session_state.messages.append(
            {"role": "assistant", "content": combined_response}
        )