Update app.py
Browse files
app.py
CHANGED
@@ -136,10 +136,10 @@ async def text_mode():
|
|
136 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
137 |
if radio_1 == 'GPT-3.5':
|
138 |
# print('----------'*5)
|
139 |
-
print('radio_1: GPT-3.5 starts!')
|
140 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
141 |
else:
|
142 |
-
print('radio_1: GPT-4.0 starts!')
|
143 |
st.session_state["openai_model"] = "gpt-4"
|
144 |
|
145 |
# Initialize chat history
|
@@ -154,8 +154,8 @@ async def text_mode():
|
|
154 |
# Display assistant response in chat message container
|
155 |
# if prompt := st.chat_input("Say something"):
|
156 |
prompt = st.chat_input("Say something")
|
157 |
-
print('prompt now:', prompt)
|
158 |
-
print('----------'*5)
|
159 |
# if prompt:
|
160 |
if prompt:
|
161 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
@@ -167,7 +167,7 @@ async def text_mode():
|
|
167 |
full_response = ""
|
168 |
|
169 |
if radio_2 == '联网模式':
|
170 |
-
print('联网模式入口,prompt:', prompt)
|
171 |
input_message = prompt
|
172 |
internet_search_result = search(input_message)
|
173 |
search_prompt = [
|
@@ -197,8 +197,8 @@ async def text_mode():
|
|
197 |
st.session_state.messages = []
|
198 |
|
199 |
if radio_2 == '核心模式':
|
200 |
-
print('GPT only starts!!!')
|
201 |
-
print('messages:', st.session_state['messages'])
|
202 |
for response in openai.ChatCompletion.create(
|
203 |
model=st.session_state["openai_model"],
|
204 |
# messages=[
|
@@ -212,14 +212,14 @@ async def text_mode():
|
|
212 |
full_response += response.choices[0].delta.get(
|
213 |
"content", "")
|
214 |
message_placeholder.markdown(full_response + "▌")
|
215 |
-
print('session completed!')
|
216 |
message_placeholder.markdown(full_response)
|
217 |
st.session_state.messages.append(
|
218 |
{"role": "assistant", "content": full_response})
|
219 |
|
220 |
|
221 |
async def data_mode():
|
222 |
-
print('数据分析模式启动!')
|
223 |
# uploaded_file_path = './upload.csv'
|
224 |
uploaded_file_path = f'./{name}_upload.csv'
|
225 |
# # st.write(f"passed file path in data_mode: {uploaded_file_path}")
|
@@ -238,8 +238,8 @@ async def data_mode():
|
|
238 |
# Display assistant response in chat message container
|
239 |
# if prompt := st.chat_input("Say something"):
|
240 |
prompt = st.chat_input("Say something")
|
241 |
-
print('prompt now:', prompt)
|
242 |
-
print('----------'*5)
|
243 |
# if prompt:
|
244 |
if prompt:
|
245 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
@@ -269,7 +269,7 @@ async def data_mode():
|
|
269 |
user_request = environ_settings + "\n\n" + \
|
270 |
"你需要完成以下任务:\n\n" + prompt + "\n\n" \
|
271 |
f"注:文件位置在{uploaded_file_path}"
|
272 |
-
print('user_request: \n', user_request)
|
273 |
|
274 |
# 加载上传的文件,主要路径在上面代码中。
|
275 |
files = [File.from_path(str(uploaded_file_path))]
|
@@ -281,7 +281,7 @@ async def data_mode():
|
|
281 |
)
|
282 |
|
283 |
# output to the user
|
284 |
-
print("AI: ", response.content)
|
285 |
full_response = response.content
|
286 |
### full_response = "this is full response"
|
287 |
|
@@ -473,11 +473,11 @@ if __name__ == "__main__":
|
|
473 |
import asyncio
|
474 |
try:
|
475 |
if radio_2 == "核心模式":
|
476 |
-
print(f'radio 选择了 {radio_2}')
|
477 |
# * 也可以用命令执行这个python文件。’streamlit run frontend/app.py‘
|
478 |
asyncio.run(text_mode())
|
479 |
if radio_2 == "联网模式":
|
480 |
-
print(f'radio 选择了 {radio_2}')
|
481 |
# * 也可以用命令执行这个python文件。’streamlit run frontend/app.py‘
|
482 |
asyncio.run(text_mode())
|
483 |
if radio_2 == "数据模式":
|
|
|
136 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
137 |
if radio_1 == 'GPT-3.5':
|
138 |
# print('----------'*5)
|
139 |
+
# print('radio_1: GPT-3.5 starts!')
|
140 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
141 |
else:
|
142 |
+
# print('radio_1: GPT-4.0 starts!')
|
143 |
st.session_state["openai_model"] = "gpt-4"
|
144 |
|
145 |
# Initialize chat history
|
|
|
154 |
# Display assistant response in chat message container
|
155 |
# if prompt := st.chat_input("Say something"):
|
156 |
prompt = st.chat_input("Say something")
|
157 |
+
# print('prompt now:', prompt)
|
158 |
+
# print('----------'*5)
|
159 |
# if prompt:
|
160 |
if prompt:
|
161 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
167 |
full_response = ""
|
168 |
|
169 |
if radio_2 == '联网模式':
|
170 |
+
# print('联网模式入口,prompt:', prompt)
|
171 |
input_message = prompt
|
172 |
internet_search_result = search(input_message)
|
173 |
search_prompt = [
|
|
|
197 |
st.session_state.messages = []
|
198 |
|
199 |
if radio_2 == '核心模式':
|
200 |
+
# print('GPT only starts!!!')
|
201 |
+
# print('messages:', st.session_state['messages'])
|
202 |
for response in openai.ChatCompletion.create(
|
203 |
model=st.session_state["openai_model"],
|
204 |
# messages=[
|
|
|
212 |
full_response += response.choices[0].delta.get(
|
213 |
"content", "")
|
214 |
message_placeholder.markdown(full_response + "▌")
|
215 |
+
# print('session completed!')
|
216 |
message_placeholder.markdown(full_response)
|
217 |
st.session_state.messages.append(
|
218 |
{"role": "assistant", "content": full_response})
|
219 |
|
220 |
|
221 |
async def data_mode():
|
222 |
+
# print('数据分析模式启动!')
|
223 |
# uploaded_file_path = './upload.csv'
|
224 |
uploaded_file_path = f'./{name}_upload.csv'
|
225 |
# # st.write(f"passed file path in data_mode: {uploaded_file_path}")
|
|
|
238 |
# Display assistant response in chat message container
|
239 |
# if prompt := st.chat_input("Say something"):
|
240 |
prompt = st.chat_input("Say something")
|
241 |
+
# print('prompt now:', prompt)
|
242 |
+
# print('----------'*5)
|
243 |
# if prompt:
|
244 |
if prompt:
|
245 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
269 |
user_request = environ_settings + "\n\n" + \
|
270 |
"你需要完成以下任务:\n\n" + prompt + "\n\n" \
|
271 |
f"注:文件位置在{uploaded_file_path}"
|
272 |
+
# print('user_request: \n', user_request)
|
273 |
|
274 |
# 加载上传的文件,主要路径在上面代码中。
|
275 |
files = [File.from_path(str(uploaded_file_path))]
|
|
|
281 |
)
|
282 |
|
283 |
# output to the user
|
284 |
+
# print("AI: ", response.content)
|
285 |
full_response = response.content
|
286 |
### full_response = "this is full response"
|
287 |
|
|
|
473 |
import asyncio
|
474 |
try:
|
475 |
if radio_2 == "核心模式":
|
476 |
+
# print(f'radio 选择了 {radio_2}')
|
477 |
# * 也可以用命令执行这个python文件。’streamlit run frontend/app.py‘
|
478 |
asyncio.run(text_mode())
|
479 |
if radio_2 == "联网模式":
|
480 |
+
# print(f'radio 选择了 {radio_2}')
|
481 |
# * 也可以用命令执行这个python文件。’streamlit run frontend/app.py‘
|
482 |
asyncio.run(text_mode())
|
483 |
if radio_2 == "数据模式":
|