Benjamin Consolvo commited on
Commit
0dc0782
·
1 Parent(s): c274166

OpenAI key on sidebar

Browse files
Files changed (2) hide show
  1. .gitignore +1 -1
  2. app.py +76 -75
.gitignore CHANGED
@@ -1 +1 @@
1
- app_v2.py
 
1
+ app_backup.py
app.py CHANGED
@@ -1,75 +1,76 @@
1
- # ©2024 Intel Corporation
2
- # Permission is granted for recipient to internally use and modify this software for purposes of benchmarking and testing on Intel architectures.
3
- # This software is provided "AS IS" possibly with faults, bugs or errors; it is not intended for production use, and recipient uses this design at their own risk with no liability to Intel.
4
- # Intel disclaims all warranties, express or implied, including warranties of merchantability, fitness for a particular purpose, and non-infringement.
5
- # Recipient agrees that any feedback it provides to Intel about this software is licensed to Intel for any purpose worldwide. No permission is granted to use Intel’s trademarks.
6
- # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the code.
7
-
8
- # Import necessary libraries
9
-
10
- import streamlit as st
11
- import os
12
- from openai import OpenAI
13
- import json
14
-
15
-
16
-
17
- working_dir = os.path.dirname(os.path.abspath(__file__))
18
- endpoint_data = json.load(open(f"{working_dir}/model_info.json"))
19
-
20
- def clear_chat():
21
- st.session_state.messages = []
22
-
23
- st.title("Intel® AI for Enterprise Inference - Chatbot")
24
-
25
- # Extract the keys (model names) from the JSON data
26
- model_names = list(endpoint_data.keys())
27
-
28
- with st.sidebar:
29
- modelname = st.selectbox("Select a LLM model (Hosted by DENVR DATAWORKS) ", model_names)
30
- st.write(f"You selected: {modelname}")
31
- st.button("Start New Chat", on_click=clear_chat)
32
-
33
- endpoint = endpoint_data[modelname]
34
-
35
- # api_key=os.environ.get('API_KEY')
36
- # api_key = st.secrets["openai_apikey"]
37
- api_key = None
38
-
39
- if not api_key:
40
- st.info("Please add your OpenAI API key to continue.")
41
- st.stop()
42
- base_url = endpoint
43
- client = OpenAI(api_key=api_key, base_url=base_url)
44
-
45
- # Extract the model name
46
- models = client.models.list()
47
- modelname = models.data[0].id
48
-
49
-
50
- if "messages" not in st.session_state:
51
- st.session_state.messages = []
52
-
53
- for message in st.session_state.messages:
54
- with st.chat_message(message["role"]):
55
- st.markdown(message["content"])
56
-
57
- if prompt := st.chat_input("What is up?"):
58
- st.session_state.messages.append({"role": "user", "content": prompt})
59
- with st.chat_message("user"):
60
- st.markdown(prompt)
61
-
62
- with st.chat_message("assistant"):
63
- stream = client.chat.completions.create(
64
- model=modelname,
65
- messages=[
66
- {"role": m["role"], "content": m["content"]}
67
- for m in st.session_state.messages
68
- ],
69
- max_tokens=5000,
70
- stream=True,
71
- )
72
- response = st.write_stream(stream)
73
- st.session_state.messages.append({"role": "assistant", "content": response})
74
-
75
-
 
 
1
+ # Import necessary libraries
2
+
3
+ import streamlit as st
4
+ import os
5
+ from openai import OpenAI
6
+ import json
7
+
8
+ working_dir = os.path.dirname(os.path.abspath(__file__))
9
+ endpoint_data = json.load(open(f"{working_dir}/model_info.json"))
10
+
11
+ def clear_chat():
12
+ st.session_state.messages = []
13
+
14
+ st.title("Intel® AI for Enterprise Inference \n Chatbot")
15
+
16
+ # Extract the keys (model names) from the JSON data
17
+ model_names = list(endpoint_data.keys())
18
+
19
+ with st.sidebar:
20
+ modelname = st.selectbox("Select a LLM model (Running on Intel® Gaudi®) ", model_names)
21
+ st.write(f"You selected: {modelname}")
22
+ st.button("Start New Chat", on_click=clear_chat)
23
+
24
+ # Add a text input for the API key
25
+ api_key = st.text_input("Enter your API Key", type="password")
26
+ if api_key:
27
+ st.session_state.api_key = api_key
28
+
29
+ # Check if the API key is provided
30
+ if "api_key" not in st.session_state or not st.session_state.api_key:
31
+ st.error("Please enter your API Key in the sidebar.")
32
+ else:
33
+ try:
34
+ endpoint = endpoint_data[modelname]
35
+
36
+ api_key = st.session_state.api_key
37
+ base_url = endpoint
38
+ client = OpenAI(api_key=api_key, base_url=base_url)
39
+
40
+ # Extract the model name
41
+ models = client.models.list()
42
+ modelname = models.data[0].id
43
+
44
+ if "messages" not in st.session_state:
45
+ st.session_state.messages = []
46
+
47
+ for message in st.session_state.messages:
48
+ with st.chat_message(message["role"]):
49
+ st.markdown(message["content"])
50
+
51
+ if prompt := st.chat_input("What is up?"):
52
+ st.session_state.messages.append({"role": "user", "content": prompt})
53
+ with st.chat_message("user"):
54
+ st.markdown(prompt)
55
+
56
+ with st.chat_message("assistant"):
57
+ try:
58
+ stream = client.chat.completions.create(
59
+ model=modelname,
60
+ messages=[
61
+ {"role": m["role"], "content": m["content"]}
62
+ for m in st.session_state.messages
63
+ ],
64
+ max_tokens=1024,
65
+ stream=True,
66
+ )
67
+ response = st.write_stream(stream)
68
+ except Exception as e:
69
+ st.error(f"An error occurred while generating the response: {e}")
70
+ response = "An error occurred while generating the response."
71
+
72
+ st.session_state.messages.append({"role": "assistant", "content": response})
73
+ except KeyError as e:
74
+ st.error(f"Key error: {e}")
75
+ except Exception as e:
76
+ st.error(f"An unexpected error occurred: {e}")