Update
Browse files
app.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
from dotenv import load_dotenv
|
4 |
import os
|
5 |
import uuid
|
6 |
from datetime import datetime
|
7 |
-
import openai
|
8 |
import time
|
9 |
import random
|
10 |
|
@@ -35,8 +34,13 @@ if "selected_model" not in st.session_state:
|
|
35 |
|
36 |
# Get OpenAI API key from environment or user input
|
37 |
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
|
|
|
|
|
|
|
|
|
|
|
38 |
if openai_api_key:
|
39 |
-
|
40 |
|
41 |
# Available models with descriptions and token limits
|
42 |
AVAILABLE_MODELS = {
|
@@ -110,7 +114,7 @@ AVAILABLE_MODELS = {
|
|
110 |
|
111 |
# Function to call OpenAI API
|
112 |
def get_ai_response(prompt, history):
|
113 |
-
if not
|
114 |
return get_demo_response(prompt)
|
115 |
|
116 |
try:
|
@@ -124,27 +128,17 @@ def get_ai_response(prompt, history):
|
|
124 |
model = st.session_state.selected_model
|
125 |
model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
|
126 |
|
127 |
-
response =
|
128 |
model=model,
|
129 |
messages=messages,
|
130 |
temperature=model_config["temperature"],
|
131 |
max_tokens=model_config["output_tokens"],
|
132 |
stream=False
|
133 |
)
|
134 |
-
return response
|
135 |
|
136 |
-
except openai.error.AuthenticationError as auth_err:
|
137 |
-
return f"Authentication error: {str(auth_err)}. Check your API key."
|
138 |
-
except openai.error.InvalidRequestError as inv_err:
|
139 |
-
return f"Invalid request: {str(inv_err)}. The model '{model}' might not be available with your API key."
|
140 |
-
except openai.error.APIConnectionError as conn_err:
|
141 |
-
return f"API connection error: {str(conn_err)}. Please check your network and try again."
|
142 |
-
except openai.error.RateLimitError as rate_err:
|
143 |
-
return f"Rate limit exceeded: {str(rate_err)}. Please wait before trying again."
|
144 |
-
except openai.error.OpenAIError as openai_err:
|
145 |
-
return f"OpenAI error: {str(openai_err)}. An unexpected error occurred."
|
146 |
except Exception as e:
|
147 |
-
return f"An
|
148 |
|
149 |
# Function to create a new conversation
|
150 |
def create_new_chat():
|
|
|
1 |
import streamlit as st
|
2 |
+
from openai import OpenAI
|
3 |
from dotenv import load_dotenv
|
4 |
import os
|
5 |
import uuid
|
6 |
from datetime import datetime
|
|
|
7 |
import time
|
8 |
import random
|
9 |
|
|
|
34 |
|
35 |
# Get OpenAI API key from environment or user input
|
36 |
openai_api_key = os.getenv("OPENAI_API_KEY_NEW_3")
|
37 |
+
if not openai_api_key:
|
38 |
+
openai_api_key = st.sidebar.text_input("Enter OpenAI API Key", type="password")
|
39 |
+
|
40 |
+
# Initialize OpenAI client
|
41 |
+
client = None
|
42 |
if openai_api_key:
|
43 |
+
client = OpenAI(api_key=openai_api_key)
|
44 |
|
45 |
# Available models with descriptions and token limits
|
46 |
AVAILABLE_MODELS = {
|
|
|
114 |
|
115 |
# Function to call OpenAI API
|
116 |
def get_ai_response(prompt, history):
|
117 |
+
if not client:
|
118 |
return get_demo_response(prompt)
|
119 |
|
120 |
try:
|
|
|
128 |
model = st.session_state.selected_model
|
129 |
model_config = AVAILABLE_MODELS.get(model, AVAILABLE_MODELS["gpt-3.5-turbo"])
|
130 |
|
131 |
+
response = client.chat.completions.create(
|
132 |
model=model,
|
133 |
messages=messages,
|
134 |
temperature=model_config["temperature"],
|
135 |
max_tokens=model_config["output_tokens"],
|
136 |
stream=False
|
137 |
)
|
138 |
+
return response.choices[0].message.content
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
except Exception as e:
|
141 |
+
return f"An error occurred: {str(e)}."
|
142 |
|
143 |
# Function to create a new conversation
|
144 |
def create_new_chat():
|