lx160cm commited on
Commit
b45aed6
·
verified ·
1 Parent(s): a614663

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -141
app.py CHANGED
@@ -1,142 +1,142 @@
1
- import time
2
- import os
3
- import joblib
4
- import streamlit as st
5
- import google.generativeai as genai
6
- from dotenv import load_dotenv
7
- load_dotenv()
8
- GOOGLE_API_KEY=os.environ.get('GOOGLE_API_KEY')
9
- genai.configure(api_key=GOOGLE_API_KEY)
10
-
11
- new_chat_id = f'{time.time()}'
12
- MODEL_ROLE = 'ai'
13
- AI_AVATAR_ICON = '✨'
14
-
15
- # Create a data/ folder if it doesn't already exist
16
- try:
17
- os.mkdir('data/')
18
- except:
19
- # data/ folder already exists
20
- pass
21
-
22
- # Load past chats (if available)
23
- try:
24
- past_chats: dict = joblib.load('data/past_chats_list')
25
- except:
26
- past_chats = {}
27
-
28
- # Sidebar allows a list of past chats
29
- with st.sidebar:
30
- st.write('# Past Chats')
31
- if st.session_state.get('chat_id') is None:
32
- st.session_state.chat_id = st.selectbox(
33
- label='Pick a past chat',
34
- options=[new_chat_id] + list(past_chats.keys()),
35
- format_func=lambda x: past_chats.get(x, 'New Chat'),
36
- placeholder='_',
37
- )
38
- else:
39
- # This will happen the first time AI response comes in
40
- st.session_state.chat_id = st.selectbox(
41
- label='Pick a past chat',
42
- options=[new_chat_id, st.session_state.chat_id] + list(past_chats.keys()),
43
- index=1,
44
- format_func=lambda x: past_chats.get(x, 'New Chat' if x != st.session_state.chat_id else st.session_state.chat_title),
45
- placeholder='_',
46
- )
47
- # Save new chats after a message has been sent to AI
48
- # TODO: Give user a chance to name chat
49
- st.session_state.chat_title = f'ChatSession-{st.session_state.chat_id}'
50
-
51
- st.write('# Chat with our Advanced Opensource LLM')
52
-
53
- # Chat history (allows to ask multiple questions)
54
- try:
55
- st.session_state.messages = joblib.load(
56
- f'data/{st.session_state.chat_id}-st_messages'
57
- )
58
- st.session_state.gemini_history = joblib.load(
59
- f'data/{st.session_state.chat_id}-Alex_messages'
60
- )
61
- print('old cache')
62
- except:
63
- st.session_state.messages = []
64
- st.session_state.gemini_history = []
65
- print('new_cache made')
66
- st.session_state.model = genai.GenerativeModel('gemini-pro')
67
- st.session_state.chat = st.session_state.model.start_chat(
68
- history=st.session_state.gemini_history,
69
- )
70
-
71
- # Display chat messages from history on app rerun
72
- for message in st.session_state.messages:
73
- with st.chat_message(
74
- name=message['role'],
75
- avatar=message.get('avatar'),
76
- ):
77
- st.markdown(message['content'])
78
-
79
- # React to user input
80
- if prompt := st.chat_input('Your message here...'):
81
- # Save this as a chat for later
82
- if st.session_state.chat_id not in past_chats.keys():
83
- past_chats[st.session_state.chat_id] = st.session_state.chat_title
84
- joblib.dump(past_chats, 'data/past_chats_list')
85
- # Display user message in chat message container
86
- with st.chat_message('user'):
87
- st.markdown(prompt)
88
- # Add user message to chat history
89
- st.session_state.messages.append(
90
- dict(
91
- role='user',
92
- content=prompt,
93
- )
94
- )
95
- ## Send message to AI
96
- response = st.session_state.chat.send_message(
97
- prompt,
98
- stream=True,
99
- )
100
- # Display assistant response in chat message container
101
- with st.chat_message(
102
- name=MODEL_ROLE,
103
- avatar=AI_AVATAR_ICON,
104
- ):
105
- message_placeholder = st.empty()
106
- full_response = ''
107
- assistant_response = response
108
- # Streams in a chunk at a time
109
- for chunk in response:
110
- # Simulate stream of chunk
111
- # TODO: Chunk missing `text` if API stops mid-stream ("safety"?)
112
- for ch in chunk.text.split(' '):
113
- full_response += ch + ' '
114
- time.sleep(0.05)
115
- # Rewrites with a cursor at end
116
- message_placeholder.write(full_response + '▌')
117
- # Write full message with placeholder
118
- message_placeholder.write(full_response)
119
-
120
- # Add assistant response to chat history
121
- st.session_state.messages.append(
122
- dict(
123
- role=MODEL_ROLE,
124
- content=st.session_state.chat.history[-1].parts[0].text,
125
- avatar=AI_AVATAR_ICON,
126
- )
127
- )
128
- st.session_state.gemini_history = st.session_state.chat.history
129
- # Save to file
130
- joblib.dump(
131
- st.session_state.messages,
132
- f'data/{st.session_state.chat_id}-st_messages',
133
- )
134
- joblib.dump(
135
- st.session_state.gemini_history,
136
- f'data/{st.session_state.chat_id}-Alex_messages',
137
- )
138
-
139
- footer_html = """<div style='text-align: center;'>
140
- <p>Developed by Ramu Maloth</p>
141
- </div>"""
142
  st.markdown(footer_html, unsafe_allow_html=True)
 
1
+ import time
2
+ import os
3
+ import joblib
4
+ import streamlit as st
5
+ import google.generativeai as genai
6
+ from dotenv import load_dotenv
7
+ load_dotenv()
8
+ GOOGLE_API_KEY=os.environ.get('GOOGLE_API_KEY')
9
+ genai.configure(api_key=GOOGLE_API_KEY)
10
+
11
+ new_chat_id = f'{time.time()}'
12
+ MODEL_ROLE = 'ai'
13
+ AI_AVATAR_ICON = '✨'
14
+
15
+ # Create a data/ folder if it doesn't already exist
16
+ try:
17
+ os.mkdir('data/')
18
+ except:
19
+ # data/ folder already exists
20
+ pass
21
+
22
+ # Load past chats (if available)
23
+ try:
24
+ past_chats: dict = joblib.load('data/past_chats_list')
25
+ except:
26
+ past_chats = {}
27
+
28
+ # Sidebar allows a list of past chats
29
+ with st.sidebar:
30
+ st.write('# Past Chats')
31
+ if st.session_state.get('chat_id') is None:
32
+ st.session_state.chat_id = st.selectbox(
33
+ label='Pick a past chat',
34
+ options=[new_chat_id] + list(past_chats.keys()),
35
+ format_func=lambda x: past_chats.get(x, 'New Chat'),
36
+ placeholder='_',
37
+ )
38
+ else:
39
+ # This will happen the first time AI response comes in
40
+ st.session_state.chat_id = st.selectbox(
41
+ label='Pick a past chat',
42
+ options=[new_chat_id, st.session_state.chat_id] + list(past_chats.keys()),
43
+ index=1,
44
+ format_func=lambda x: past_chats.get(x, 'New Chat' if x != st.session_state.chat_id else st.session_state.chat_title),
45
+ placeholder='_',
46
+ )
47
+ # Save new chats after a message has been sent to AI
48
+ # TODO: Give user a chance to name chat
49
+ st.session_state.chat_title = f'ChatSession-{st.session_state.chat_id}'
50
+
51
+ st.write('# Chat with our Opensource LLM')
52
+
53
+ # Chat history (allows to ask multiple questions)
54
+ try:
55
+ st.session_state.messages = joblib.load(
56
+ f'data/{st.session_state.chat_id}-st_messages'
57
+ )
58
+ st.session_state.gemini_history = joblib.load(
59
+ f'data/{st.session_state.chat_id}-Alex_messages'
60
+ )
61
+ print('old cache')
62
+ except:
63
+ st.session_state.messages = []
64
+ st.session_state.gemini_history = []
65
+ print('new_cache made')
66
+ st.session_state.model = genai.GenerativeModel('gemini-pro')
67
+ st.session_state.chat = st.session_state.model.start_chat(
68
+ history=st.session_state.gemini_history,
69
+ )
70
+
71
+ # Display chat messages from history on app rerun
72
+ for message in st.session_state.messages:
73
+ with st.chat_message(
74
+ name=message['role'],
75
+ avatar=message.get('avatar'),
76
+ ):
77
+ st.markdown(message['content'])
78
+
79
+ # React to user input
80
+ if prompt := st.chat_input('Your message here...'):
81
+ # Save this as a chat for later
82
+ if st.session_state.chat_id not in past_chats.keys():
83
+ past_chats[st.session_state.chat_id] = st.session_state.chat_title
84
+ joblib.dump(past_chats, 'data/past_chats_list')
85
+ # Display user message in chat message container
86
+ with st.chat_message('user'):
87
+ st.markdown(prompt)
88
+ # Add user message to chat history
89
+ st.session_state.messages.append(
90
+ dict(
91
+ role='user',
92
+ content=prompt,
93
+ )
94
+ )
95
+ ## Send message to AI
96
+ response = st.session_state.chat.send_message(
97
+ prompt,
98
+ stream=True,
99
+ )
100
+ # Display assistant response in chat message container
101
+ with st.chat_message(
102
+ name=MODEL_ROLE,
103
+ avatar=AI_AVATAR_ICON,
104
+ ):
105
+ message_placeholder = st.empty()
106
+ full_response = ''
107
+ assistant_response = response
108
+ # Streams in a chunk at a time
109
+ for chunk in response:
110
+ # Simulate stream of chunk
111
+ # TODO: Chunk missing `text` if API stops mid-stream ("safety"?)
112
+ for ch in chunk.text.split(' '):
113
+ full_response += ch + ' '
114
+ time.sleep(0.05)
115
+ # Rewrites with a cursor at end
116
+ message_placeholder.write(full_response + '▌')
117
+ # Write full message with placeholder
118
+ message_placeholder.write(full_response)
119
+
120
+ # Add assistant response to chat history
121
+ st.session_state.messages.append(
122
+ dict(
123
+ role=MODEL_ROLE,
124
+ content=st.session_state.chat.history[-1].parts[0].text,
125
+ avatar=AI_AVATAR_ICON,
126
+ )
127
+ )
128
+ st.session_state.gemini_history = st.session_state.chat.history
129
+ # Save to file
130
+ joblib.dump(
131
+ st.session_state.messages,
132
+ f'data/{st.session_state.chat_id}-st_messages',
133
+ )
134
+ joblib.dump(
135
+ st.session_state.gemini_history,
136
+ f'data/{st.session_state.chat_id}-Alex_messages',
137
+ )
138
+
139
+ footer_html = """<div style='text-align: center;'>
140
+ <p>Developed by Ramu Maloth</p>
141
+ </div>"""
142
  st.markdown(footer_html, unsafe_allow_html=True)