Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Generator
|
|
5 |
from groq import Groq
|
6 |
|
7 |
_ = load_dotenv(find_dotenv())
|
8 |
-
st.set_page_config(page_icon="💬", layout="wide", page_title="Groq
|
9 |
|
10 |
|
11 |
def icon(emoji: str):
|
@@ -18,7 +18,31 @@ def icon(emoji: str):
|
|
18 |
|
19 |
icon("📣")
|
20 |
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
client = Groq(
|
24 |
api_key=os.environ['GROQ_API_KEY'],
|
@@ -44,27 +68,6 @@ models = {
|
|
44 |
},
|
45 |
}
|
46 |
|
47 |
-
# Layout for model selection and max_tokens slider
|
48 |
-
col1, col2 = st.columns([1, 3]) # Adjust the ratio to make the first column smaller
|
49 |
-
|
50 |
-
|
51 |
-
with col1:
|
52 |
-
model_option = st.selectbox(
|
53 |
-
"Choose a model:",
|
54 |
-
options=list(models.keys()),
|
55 |
-
format_func=lambda x: models[x]["name"],
|
56 |
-
index=0, # Default to the first model in the list
|
57 |
-
)
|
58 |
-
max_tokens_range = models[model_option]["tokens"]
|
59 |
-
max_tokens = st.slider(
|
60 |
-
"Max Tokens:",
|
61 |
-
min_value=512,
|
62 |
-
max_value=max_tokens_range,
|
63 |
-
value=min(32768, max_tokens_range),
|
64 |
-
step=512,
|
65 |
-
help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
|
66 |
-
)
|
67 |
-
|
68 |
# Detect model change and clear chat history if model has changed
|
69 |
if st.session_state.selected_model != model_option:
|
70 |
st.session_state.messages = []
|
|
|
5 |
from groq import Groq
|
6 |
|
7 |
_ = load_dotenv(find_dotenv())
|
8 |
+
st.set_page_config(page_icon="💬", layout="wide", page_title="Groq & LLaMA3...")
|
9 |
|
10 |
|
11 |
def icon(emoji: str):
|
|
|
18 |
|
19 |
icon("📣")
|
20 |
|
21 |
+
# Header layout with title and model/token components on the same line
|
22 |
+
header_col1, header_col2 = st.columns([2, 1])
|
23 |
+
|
24 |
+
with header_col1:
|
25 |
+
st.subheader("Groq & LLaMA3 Chat")
|
26 |
+
|
27 |
+
with header_col2:
|
28 |
+
model_option = st.selectbox(
|
29 |
+
"Choose a model:",
|
30 |
+
options=list(models.keys()),
|
31 |
+
format_func=lambda x: models[x]["name"],
|
32 |
+
index=0, # Default to the first model in the list
|
33 |
+
)
|
34 |
+
max_tokens_range = models[model_option]["tokens"]
|
35 |
+
max_tokens = st.slider(
|
36 |
+
"Max Tokens:",
|
37 |
+
min_value=512,
|
38 |
+
max_value=max_tokens_range,
|
39 |
+
value=min(32768, max_tokens_range),
|
40 |
+
step=512,
|
41 |
+
help=f"Adjust the maximum number of tokens (words) for the model's response. Max for selected model: {max_tokens_range}",
|
42 |
+
)
|
43 |
+
|
44 |
+
# Place separator bar after header
|
45 |
+
st.markdown("---")
|
46 |
|
47 |
client = Groq(
|
48 |
api_key=os.environ['GROQ_API_KEY'],
|
|
|
68 |
},
|
69 |
}
|
70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
# Detect model change and clear chat history if model has changed
|
72 |
if st.session_state.selected_model != model_option:
|
73 |
st.session_state.messages = []
|