Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
|
4 |
-
# Configure the Gemini API
|
5 |
-
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Create the model with system instructions
|
8 |
generation_config = {
|
9 |
-
"temperature":
|
10 |
"top_p": 0.95,
|
11 |
"top_k": 64,
|
12 |
"max_output_tokens": 8192,
|
@@ -20,8 +24,11 @@ model = genai.GenerativeModel(
|
|
20 |
chat_session = model.start_chat(history=[])
|
21 |
|
22 |
def generate_response(user_input):
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
25 |
|
26 |
# Streamlit UI setup
|
27 |
st.set_page_config(page_title="Sleek AI Code Assistant", page_icon="π", layout="wide")
|
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
|
4 |
+
# Configure the Gemini API with error handling
|
5 |
+
try:
|
6 |
+
genai.configure(api_key=st.secrets["GOOGLE_API_KEY"])
|
7 |
+
except Exception as e:
|
8 |
+
st.error(f"Failed to configure API: {e}")
|
9 |
+
st.stop()
|
10 |
|
11 |
# Create the model with system instructions
|
12 |
generation_config = {
|
13 |
+
"temperature": 0.7, # Slightly lower temperature for more focused responses
|
14 |
"top_p": 0.95,
|
15 |
"top_k": 64,
|
16 |
"max_output_tokens": 8192,
|
|
|
24 |
chat_session = model.start_chat(history=[])
|
25 |
|
26 |
def generate_response(user_input):
|
27 |
+
try:
|
28 |
+
response = chat_session.send_message(user_input)
|
29 |
+
return response.text
|
30 |
+
except Exception as e:
|
31 |
+
return f"An error occurred while generating response: {e}"
|
32 |
|
33 |
# Streamlit UI setup
|
34 |
st.set_page_config(page_title="Sleek AI Code Assistant", page_icon="π", layout="wide")
|