EinsteinCoder commited on
Commit
5add07b
·
verified ·
1 Parent(s): f5c8af2

Create chat-app

Browse files
Files changed (1) hide show
  1. src/chat-app +117 -0
src/chat-app ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import streamlit as st
4
+ import os
5
+ from dotenv import load_dotenv
6
+ load_dotenv()
7
+
8
+ client_id = os.getenv("CLIENT_ID")
9
+ client_secret = os.getenv("CLIENT_SECRET")
10
+ base_url = os.getenv("BASE_URL")
11
+
12
+ def get_access_token():
13
+ url = base_url+"/services/oauth2/token"
14
+ payload = {
15
+ "grant_type": "client_credentials",
16
+ "client_id": client_id,
17
+ "client_secret": client_secret
18
+ }
19
+ response = requests.post(url, data=payload)
20
+ # Add error handling for response
21
+ if response.status_code != 200:
22
+ st.error(f"Error fetching access token: {response.status_code} - {response.text}")
23
+ return None
24
+ data = response.json()
25
+ access_token = data.get('access_token', 'Token not found')
26
+ return access_token
27
+
28
+ # Add model selection dictionary
29
+ MODEL_OPTIONS = {
30
+ "GPT4Omni": "sfdc_ai__DefaultOpenAIGPT4Omni",
31
+ "Gemini": "sfdc_ai__DefaultVertexAIGemini20Flash001",
32
+ "Claude": "sfdc_ai__DefaultBedrockAnthropicClaude37Sonnet"
33
+ }
34
+
35
+ # Configure Streamlit page settings
36
+ st.set_page_config(
37
+ page_title="Chat with Einstein LLMs!",
38
+ page_icon=":brain:", # Favicon emoji
39
+ layout="wide", # Page layout option
40
+ )
41
+
42
+ # Add sidebar with model selection
43
+ with st.sidebar:
44
+ st.title("Model Settings")
45
+ selected_model_name = st.selectbox(
46
+ "Choose AI Model",
47
+ options=list(MODEL_OPTIONS.keys()),
48
+ index=0
49
+ )
50
+ model = MODEL_OPTIONS[selected_model_name]
51
+
52
+ # Update the page title to reflect selected model
53
+ st.subheader(f"🤖 Chat with {selected_model_name}")
54
+
55
+ # Modify get_gpt_response function to use selected model
56
+ def get_gpt_response(prompt):
57
+ url = f"https://api.salesforce.com/einstein/platform/v1/models/{model}/chat-generations"
58
+ access_token = get_access_token()
59
+ headers = {
60
+ "Authorization": f"Bearer {access_token}",
61
+ "Content-Type": "application/json;charset=utf-8",
62
+ 'x-sfdc-app-context': 'EinsteinGPT',
63
+ 'x-client-feature-id': 'ai-platform-models-connected-app'
64
+ }
65
+ chat_payload = {
66
+ "messages": prompt
67
+ }
68
+
69
+ try:
70
+ response = requests.post(url, headers=headers, data=json.dumps(chat_payload))
71
+ response.raise_for_status() # Raise exception for bad status codes
72
+ data = response.json()
73
+ return data["generationDetails"]["generations"][0]["content"]
74
+ except requests.exceptions.RequestException as e:
75
+ st.error(f"Error calling the API: {str(e)}")
76
+ return "I apologize, but I encountered an error. Please try again."
77
+ except (KeyError, IndexError) as e:
78
+ st.error(f"Error parsing response: {str(e)}")
79
+ return "I apologize, but I received an invalid response. Please try again."
80
+
81
+ # Display the chatbot's title on the page
82
+ if "messages" not in st.session_state:
83
+ st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
84
+
85
+ for msg in st.session_state.messages:
86
+ st.chat_message(msg["role"], avatar="🤖").write(msg["content"])
87
+ if "image" in msg:
88
+ st.image(msg["image"])
89
+
90
+ prompt = st.chat_input(
91
+ "Say something and/or attach an image",
92
+ accept_file=True,
93
+ file_type=["jpg", "jpeg", "png"],
94
+ )
95
+
96
+ if prompt:
97
+ # Handle text input
98
+ if prompt.text:
99
+ st.session_state.messages.append({"role": "user", "content": prompt.text})
100
+ st.chat_message("user").write(prompt.text)
101
+
102
+ # Handle image upload
103
+ if prompt.get("files"):
104
+ uploaded_file = prompt["files"][0]
105
+ st.session_state.messages.append({
106
+ "role": "user",
107
+ "content": "Uploaded an image",
108
+ "image": uploaded_file
109
+ })
110
+ st.chat_message("user").write("Uploaded an image")
111
+ st.image(uploaded_file)
112
+
113
+ # Get AI response if there's any input
114
+ if prompt.text or prompt.get("files"):
115
+ msg = get_gpt_response(st.session_state.messages)
116
+ st.session_state.messages.append({"role": "assistant", "content": msg})
117
+ st.chat_message("assistant", avatar="🤖").write(msg)