Upload app.py
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@
|
|
17 |
credentials["usernames"].update({un: user_dict})
|
18 |
|
19 |
'''
|
20 |
-
# TODO:
|
21 |
|
22 |
import database as db
|
23 |
from deta import Deta # pip3 install deta
|
@@ -178,12 +178,12 @@ async def text_mode():
|
|
178 |
# Set a default model
|
179 |
if "openai_model" not in st.session_state:
|
180 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
181 |
-
if radio_1 == '
|
182 |
# print('----------'*5)
|
183 |
-
print('radio_1:
|
184 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
185 |
else:
|
186 |
-
print('radio_1:
|
187 |
st.session_state["openai_model"] = "gpt-4-1106-preview"
|
188 |
|
189 |
# Initialize chat history
|
@@ -350,9 +350,10 @@ def localKB_mode(username):
|
|
350 |
# st.session_state.messages.append(
|
351 |
# {"role": "assistant", "content": full_response})
|
352 |
|
353 |
-
async def data_mode():
|
354 |
-
|
355 |
clear_all() ## reset the conversation.
|
|
|
356 |
# uploaded_file_path = './upload.csv'
|
357 |
uploaded_file_path = f'./{username}_upload.csv'
|
358 |
# uploaded_file_path = "/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/test_upload.csv"
|
@@ -379,74 +380,82 @@ async def data_mode():
|
|
379 |
print('----------'*5)
|
380 |
# if prompt:
|
381 |
if prompt:
|
382 |
-
|
383 |
-
|
384 |
-
st.
|
|
|
385 |
|
386 |
-
|
387 |
-
async with CodeInterpreterSession() as session:
|
388 |
print('111')
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
"
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
#
|
422 |
-
#
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
450 |
|
451 |
### authentication with a local yaml file.
|
452 |
import yaml
|
@@ -531,8 +540,8 @@ if authentication_status:
|
|
531 |
"""
|
532 |
<style>
|
533 |
[data-testid="stSidebar"][aria-expanded="true"]{
|
534 |
-
min-width:
|
535 |
-
max-width:
|
536 |
}
|
537 |
""",
|
538 |
unsafe_allow_html=True,
|
@@ -599,8 +608,8 @@ if authentication_status:
|
|
599 |
col1, col2 = st.columns(spec=[1, 2])
|
600 |
radio_2 = col2.radio(label='模式选择', options=[
|
601 |
'核心模式', '联网模式', '知识库模式', '数据模式'], horizontal=True, label_visibility='visible')
|
602 |
-
radio_1 = col1.radio(label='
|
603 |
-
'
|
604 |
|
605 |
elif authentication_status == False:
|
606 |
st.error('⛔ 用户名或密码错误!')
|
@@ -711,7 +720,8 @@ if __name__ == "__main__":
|
|
711 |
# 默认状态下没有上传文件,None,会报错。需要判断。
|
712 |
if uploaded_file is not None:
|
713 |
uploaded_file_path = upload_file(uploaded_file)
|
714 |
-
asyncio.run(data_mode())
|
|
|
715 |
except:
|
716 |
# st.markdown('**请先登录!**')
|
717 |
pass
|
|
|
17 |
credentials["usernames"].update({un: user_dict})
|
18 |
|
19 |
'''
|
20 |
+
# TODO:2. account system. 3. local enterprise database.
|
21 |
|
22 |
import database as db
|
23 |
from deta import Deta # pip3 install deta
|
|
|
178 |
# Set a default model
|
179 |
if "openai_model" not in st.session_state:
|
180 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
181 |
+
if radio_1 == '模型A':
|
182 |
# print('----------'*5)
|
183 |
+
print('radio_1: 模型A starts!')
|
184 |
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
|
185 |
else:
|
186 |
+
print('radio_1: 模型B starts!')
|
187 |
st.session_state["openai_model"] = "gpt-4-1106-preview"
|
188 |
|
189 |
# Initialize chat history
|
|
|
350 |
# st.session_state.messages.append(
|
351 |
# {"role": "assistant", "content": full_response})
|
352 |
|
353 |
+
# async def data_mode():
|
354 |
+
def data_mode():
|
355 |
clear_all() ## reset the conversation.
|
356 |
+
print('数据分析模式启动!')
|
357 |
# uploaded_file_path = './upload.csv'
|
358 |
uploaded_file_path = f'./{username}_upload.csv'
|
359 |
# uploaded_file_path = "/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/test_upload.csv"
|
|
|
380 |
print('----------'*5)
|
381 |
# if prompt:
|
382 |
if prompt:
|
383 |
+
try:
|
384 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
385 |
+
with st.chat_message("user"):
|
386 |
+
st.markdown(prompt)
|
387 |
|
388 |
+
with st.chat_message("assistant"):
|
|
|
389 |
print('111')
|
390 |
+
llm_model = ChatOpenAI(model_name="gpt-4-1106-preview")
|
391 |
+
# async with CodeInterpreterSession(llm=llm_model) as session:
|
392 |
+
with CodeInterpreterSession(llm=llm_model) as session:
|
393 |
+
# with CodeInterpreterSession(llm=llm_model) as session:
|
394 |
+
print('222')
|
395 |
+
# user_request = "对于文件中的'SepalLengthCm’数据给我一个'直方图',提供图表,并给出分析结果"
|
396 |
+
#! 可以用设定dpi=300来输出高质量的图表。(注:图的解析度dpi设定为300)
|
397 |
+
environ_settings = """【背景要求】如果我没有告诉你任何定制化的要求,那么请你按照以下的默认要求来回答:
|
398 |
+
-------------------------------------------------------------------------
|
399 |
+
1. 你需要用提问的语言来回答(如:中文提问你就用中文来回答,英文提问你就用英文来回答)。
|
400 |
+
2. 如果要求你输出图表,那么图的解析度dpi需要设定为600。图尽量使用seaborn库。seaborn库的参数设定:sns.set(rc={'axes.facecolor':'#FFF9ED','figure.facecolor':'#FFF9ED'}, palette='dark'。
|
401 |
+
3. 图上所有的文字全部翻译成<英文English>来表示。
|
402 |
+
4. 你回答的文字内容必须尽可能的详细且通俗易懂。
|
403 |
+
5. 回答时尽可能地展示分析所对应的图表,并提供分析结果。 你需要按如下格式提供内容:
|
404 |
+
5.1 提供详细且专业的分析结果,提供足够的分析依据。
|
405 |
+
5.2 给出可能造成这一结果的可能原因有哪些?
|
406 |
+
以上内容全部用【1, 2, 3这样的序列号格式】来表达。
|
407 |
+
-------------------------------------------------------------------------
|
408 |
+
""" # seaborn中的palette参数可以设定图表的颜色,选项包括:deep, muted, pastel, bright, dark, colorblind,Spectral。更多参数可以参考:https://seaborn.pydata.org/generated/seaborn.color_palette.html。
|
409 |
+
|
410 |
+
# uploaded_file_path = upload_file()
|
411 |
+
|
412 |
+
user_request = environ_settings + "\n\n" + \
|
413 |
+
"你需要完成以下任务:\n\n" + prompt + "\n\n" \
|
414 |
+
f"注:文件位置在 {uploaded_file_path}"
|
415 |
+
user_request = str(prompt)
|
416 |
+
print('user_request: \n', user_request)
|
417 |
+
|
418 |
+
# 加载上传的文件,主要路径在上面代码中。
|
419 |
+
files = [File.from_path(str(uploaded_file_path))]
|
420 |
+
# files = [File.from_path("/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/rawdata/short_csv.csv")]
|
421 |
+
# st.write(pd.DataFrame(files))
|
422 |
+
# print('session.__init__', session.__init__)
|
423 |
+
# print('session', session.__init__)
|
424 |
+
|
425 |
+
with st.status('Thinking...', expanded=True, state='running') as status:
|
426 |
+
# generate the response
|
427 |
+
# response = await session.generate_response(user_msg=user_request, files=files, detailed_error=True)
|
428 |
+
# response = await session.generate_response(user_msg=user_request, files=files)
|
429 |
+
response = session.generate_response(user_msg=user_request, files=files)
|
430 |
+
|
431 |
+
# output to the user
|
432 |
+
print("AI: ", response.content)
|
433 |
+
full_response = response.content
|
434 |
+
### full_response = "this is full response"
|
435 |
+
|
436 |
+
# for file in response.files:
|
437 |
+
for i, file in enumerate(response.files):
|
438 |
+
# await file.asave(f"/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/output{i}.png") ##working.
|
439 |
+
# st.image(file.get_image()) #! working.
|
440 |
+
# * 注意这里的设定,可以提高图片的精细程度。
|
441 |
+
st.image(file.get_image(), width=None,
|
442 |
+
output_format='PNG')
|
443 |
+
|
444 |
+
# message_placeholder.markdown(full_response + "▌") ## orignal code.
|
445 |
+
# message_placeholder.markdown(full_response) ## orignal code.
|
446 |
+
st.write(full_response)
|
447 |
+
status.update(label='complete', state='complete')
|
448 |
+
|
449 |
+
# TODO: 确认是否要记录所有的full response。
|
450 |
+
st.session_state.messages.append(
|
451 |
+
{"role": "assistant", "content": full_response})
|
452 |
+
|
453 |
+
# await session.astop() # ! 确认需要关闭。
|
454 |
+
session.astop() # ! 确认需要关闭。
|
455 |
+
# st.session_state.messages.append({"role": "assistant", "content": full_response})
|
456 |
+
except Exception as e:
|
457 |
+
print(e)
|
458 |
+
pass
|
459 |
|
460 |
### authentication with a local yaml file.
|
461 |
import yaml
|
|
|
540 |
"""
|
541 |
<style>
|
542 |
[data-testid="stSidebar"][aria-expanded="true"]{
|
543 |
+
min-width: 400px;
|
544 |
+
max-width: 400px;
|
545 |
}
|
546 |
""",
|
547 |
unsafe_allow_html=True,
|
|
|
608 |
col1, col2 = st.columns(spec=[1, 2])
|
609 |
radio_2 = col2.radio(label='模式选择', options=[
|
610 |
'核心模式', '联网模式', '知识库模式', '数据模式'], horizontal=True, label_visibility='visible')
|
611 |
+
radio_1 = col1.radio(label='大模型选择', options=[
|
612 |
+
'模型A', '模型B'], horizontal=True, label_visibility='visible')
|
613 |
|
614 |
elif authentication_status == False:
|
615 |
st.error('⛔ 用户名或密码错误!')
|
|
|
720 |
# 默认状态下没有上传文件,None,会报错。需要判断。
|
721 |
if uploaded_file is not None:
|
722 |
uploaded_file_path = upload_file(uploaded_file)
|
723 |
+
# asyncio.run(data_mode())
|
724 |
+
data_mode()
|
725 |
except:
|
726 |
# st.markdown('**请先登录!**')
|
727 |
pass
|