''' 参考: https://github.com/shroominic/codeinterpreter-api 1. 可以存在本地,然后再调出来。 working. 1. 可以在临时文件夹中读取文件。 1. 可以直接在内存中读出图片。 1. 中文字体成功。 from matplotlib.font_manager import FontProperties myfont=FontProperties(fname='/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/rawdata/SimHei.ttf') sns.set_style('whitegrid',{'font.sans-serif':['simhei','Arial']}) 1. 解决了account login的问题,主要格式: ## 需要严格的按照yaml文件的格式来定义如下几个字段。 for un, name, pw in zip(users, names, hashed_passwords): # user_dict = {"name":name,"password":pw} user_dict = {"name": un, "password": pw} # credentials["usernames"].update({un:user_dict}) credentials["usernames"].update({un: user_dict}) ''' # TODO:1. Chinese display isssue. 2. account system. 3. local enterprise database. import database as db from deta import Deta # pip3 install deta import requests from codeinterpreterapi import CodeInterpreterSession, File import streamlit as st # from codeinterpreterapi import CodeInterpreterSession import openai import os import matplotlib.pyplot as plt import pandas as pd # import csv import tempfile from tempfile import NamedTemporaryFile import pathlib from pathlib import Path from matplotlib.font_manager import FontProperties import seaborn as sns from time import sleep import streamlit_authenticator as stauth import database as db # python文件同目录下的.py程序,直接导入。 import deta from langchain.chat_models import ChatOpenAI from llama_index import StorageContext, load_index_from_storage, GPTVectorStoreIndex, LLMPredictor, PromptHelper from llama_index import ServiceContext, QuestionAnswerPrompt import sys import time import PyPDF2 ## read the local_KB PDF file. # import localKB_construct import save_database_info from datetime import datetime import pytz os.environ["OPENAI_API_KEY"] = os.environ['user_token'] openai.api_key = os.environ['user_token'] # os.environ["VERBOSE"] = "True" # 可以看到具体的错误? #* 如果碰到接口问题,可以启用如下设置。 # openai.proxy = { # "http": "http://127.0.0.1:7890", # "https": "http://127.0.0.1:7890" # } # layout settings. st.title("专业版大语言模型商业智能中心") st.subheader("Artificial Intelligence Backend Center for Professionals") # clear conversion. reset_button_key = "reset_button" reset_button = st.button(label=("扫清世间烦恼,清除所有记录,并开启一轮新对话 ▶"), key=reset_button_key, use_container_width=True, type="secondary") if reset_button: st.session_state.conversation = None st.session_state.chat_history = None st.session_state.messages = [] message_placeholder = st.empty() def clear_all(): st.session_state.conversation = None st.session_state.chat_history = None st.session_state.messages = [] message_placeholder = st.empty() return None # # with tab2: # def upload_file(uploaded_file): # if uploaded_file is not None: # filename = uploaded_file.name # # st.write(filename) # print out the whole file name to validate. not to show in the final version. # try: # if '.pdf' in filename: # # pdf_file = PyPDF2.PdfReader(uploaded_file) # PyPDF2.PdfReader(uploaded_file) # # st.write(pdf_file.pages[0].extract_text()) # # with st.status('正在为您解析新知识库...', expanded=False, state='running') as status: # spinner = st.spinner('正在为您解析新知识库...请耐心等待') # # with st.spinner('正在为您解析新知识库...请耐心等待'): # with spinner: # import localKB_construct # # sleep(3) # # st.write(upload_file) # localKB_construct.process_file(uploaded_file) # st.markdown('新知识库解析成功,可以开始对话!') # spinner = st.empty() # # sleep(3) # # display = [] # else: # if '.csv' in filename: # csv_file = pd.read_csv(uploaded_file) # csv_file.to_csv('./upload.csv', encoding='utf-8', index=False) # st.write(csv_file[:3]) # 这里只是显示文件,后面需要定位文件所在的绝对路径。 # else: # xls_file = pd.read_excel(uploaded_file) # xls_file.to_csv('./upload.csv', index=False) # st.write(xls_file[:3]) # uploaded_file_name = "File_provided" # temp_dir = tempfile.TemporaryDirectory() # # ! working. # uploaded_file_path = pathlib.Path(temp_dir.name) / uploaded_file_name # # with open('./upload.csv', 'wb') as output_temporary_file: # with open(f'./{name}_upload.csv', 'wb') as output_temporary_file: # # print(f'./{name}_upload.csv') # # ! 必须用这种格式读入内容,然后才可以写入temporary文件夹中。 # # output_temporary_file.write(uploaded_file.getvalue()) # output_temporary_file.write(uploaded_file.getvalue()) # # st.write(uploaded_file_path) #* 可以查看文件是否真实存在,然后是否可以 # # st.write('Now file saved successfully.') # except Exception as e: # st.write(e) # # uploaded_file_name = "File_provided" # # temp_dir = tempfile.TemporaryDirectory() # # # ! working. # # uploaded_file_path = pathlib.Path(temp_dir.name) / uploaded_file_name # # # with open('./upload.csv', 'wb') as output_temporary_file: # # with open(f'./{name}_upload.csv', 'wb') as output_temporary_file: # # # print(f'./{name}_upload.csv') # # # ! 必须用这种格式读入内容,然后才可以写入temporary文件夹中。 # # # output_temporary_file.write(uploaded_file.getvalue()) # # output_temporary_file.write(uploaded_file.getvalue()) # # # st.write(uploaded_file_path) # * 可以查看文件是否真实存在,然后是否可以 # # # st.write('Now file saved successfully.') # return None bing_search_api_key = os.environ['bing_api_key'] bing_search_endpoint = 'https://api.bing.microsoft.com/v7.0/search' def search(query): # Construct a request # mkt = 'en-EN' mkt = 'zh-CN' params = {'q': query, 'mkt': mkt} headers = {'Ocp-Apim-Subscription-Key': bing_search_api_key} # Call the API try: response = requests.get(bing_search_endpoint, headers=headers, params=params) response.raise_for_status() json = response.json() return json["webPages"]["value"] # print("\nJSON Response:\n") # pprint(response.json()) except Exception as e: raise e # openai.api_key = st.secrets["OPENAI_API_KEY"] async def text_mode(): # Set a default model if "openai_model" not in st.session_state: st.session_state["openai_model"] = "gpt-3.5-turbo-16k" if radio_1 == 'GPT-3.5': # print('----------'*5) print('radio_1: GPT-3.5 starts!') st.session_state["openai_model"] = "gpt-3.5-turbo-16k" else: print('radio_1: GPT-4.0 starts!') st.session_state["openai_model"] = "gpt-4" # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Display assistant response in chat message container # if prompt := st.chat_input("Say something"): prompt = st.chat_input("Say something") print('prompt now:', prompt) print('----------'*5) # if prompt: if prompt: st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" if radio_2 == '联网模式': print('联网模式入口,prompt:', prompt) input_message = prompt internet_search_result = search(input_message) search_prompt = [ f"Source:\nTitle: {result['name']}\nURL: {result['url']}\nContent: {result['snippet']}" for result in internet_search_result] prompt = "基于如下的互联网公开信息, 回答问题:\n\n" + \ "\n\n".join(search_prompt[:3]) + "\n\n问题: " + input_message + \ "你需要注意的是回答问题时必须用提问的语言(如英文或者中文)来提示:'答案基于互联网公开信息。'" + "\n\n答案: " # 限制了只有3个搜索结果。 # prompt = "Use these sources to answer the question:\n\n" + "\n\n".join(search_prompt[0:3]) + "\n\nQuestion: " + input_message + "(注意:回答问题时请提示'以下答案基于互联网公开信息。')\n\n" + "\n\nAnswer: " st.session_state.messages.append( {"role": "user", "content": prompt}) for response in openai.ChatCompletion.create( model=st.session_state["openai_model"], messages=[ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ], stream=True, ): full_response += response.choices[0].delta.get( "content", "") message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(full_response) st.session_state.messages.append( {"role": "assistant", "content": full_response}) st.session_state.messages = [] if radio_2 == '核心模式': print('GPT only starts!!!') print('messages:', st.session_state['messages']) for response in openai.ChatCompletion.create( model=st.session_state["openai_model"], messages=[ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ], stream=True, # messages=[{'role': 'system', 'content': 'you are ChatGPT'}, { # 'role': 'user', 'content': prompt}], # stream=True, ): full_response += response.choices[0].delta.get( "content", "") message_placeholder.markdown(full_response + "▌") # print('session completed!') message_placeholder.markdown(full_response) st.session_state.messages.append( {"role": "assistant", "content": full_response}) ## load the local_KB PDF file. # async def localKB_mode(): def localKB_mode(username): ### clear all the prior conversation. # st.session_state.conversation = None # st.session_state.chat_history = None # st.session_state.messages = [] # message_placeholder = st.empty() clear_all() ## reset the conversation. print('now starts the local KB version of ChatGPT') # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Display assistant response in chat message container # if prompt := st.chat_input("Say something"): # prompt = st.chat_input("Say something") # print('prompt now:', prompt) # print('----------'*5) # if prompt: if prompt := st.chat_input("Say something"): st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.status('检索中...', expanded=True, state='running') as status: with st.chat_message("assistant"): message_placeholder = st.empty() full_response = "" # if radio_2 == "知识库模式": # ! 这里需要重新装载一下storage_context。 QA_PROMPT_TMPL = ( "We have provided context information below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given all this information, please answer the following questions," "You MUST use the SAME language as the question:\n" "{query_str}\n") QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL) # print('QA_PROMPT:', QA_PROMPT) # llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0.8, model_name="gpt-3.5-turbo", max_tokens=4024,streaming=True)) # # print('llm_predictor:', llm_predictor) # prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) # print('prompt_helper:', prompt_helper) # service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) # print('service_context:', service_context) # # # index = load_index_from_storage(storage_context) # print("storage_context:", storage_context) # index = load_index_from_storage(storage_context,service_context=service_context) storage_context = StorageContext.from_defaults(persist_dir=f"./{username}/") print('storage_context:',storage_context) index = load_index_from_storage(storage_context) # query_engine = index.as_query_engine(streaming=True, similarity_top_k=3, text_qa_template=QA_PROMPT) query_engine = index.as_query_engine(streaming=True) # query_engine = index.as_query_engine(streaming=True, text_qa_template=QA_PROMPT) # query_engine = index.as_query_engine(streaming=False, text_qa_template=QA_PROMPT) # query_engine = index.as_query_engine() # reply = query_engine.query(prompt) llama_index_reply = query_engine.query(prompt) # full_response += query_engine.query(prompt) print('local KB reply:', llama_index_reply) # query_engine.query(prompt).print_response_stream() #* 能在terminal中流式输出。 # for resp in llama_index_reply.response_gen: # print(resp) # full_response += resp # message_placeholder.markdown(full_response + "▌") message_placeholder.markdown(llama_index_reply) # st.session_state.messages.append( # {"role": "assistant", "content": full_response}) # st.session_state.messages = [] # full_response += reply # full_response = reply # st.session_state.messages.append( # {"role": "assistant", "content": full_response}) async def data_mode(): print('数据分析模式启动!') clear_all() ## reset the conversation. # uploaded_file_path = './upload.csv' uploaded_file_path = f'./{username}_upload.csv' # uploaded_file_path = "/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/test_upload.csv" print('file path:', uploaded_file_path) # st.write(f"passed file path in data_mode: {uploaded_file_path}") # tmp1 = pd.read_csv(uploaded_file_path) # st.markdown('成功启动数据模式,以下是加载的文件内容') # st.write(tmp1[:5]) # Initialize chat history if "messages" not in st.session_state: st.session_state.messages = [] # Display chat messages from history on app rerun for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Display assistant response in chat message container # if prompt := st.chat_input("Say something"): prompt = st.chat_input("Say something") print('prompt now:', prompt) print('----------'*5) # if prompt: if prompt: st.session_state.messages.append({"role": "user", "content": prompt}) with st.chat_message("user"): st.markdown(prompt) with st.chat_message("assistant"): async with CodeInterpreterSession() as session: # user_request = "对于文件中的'SepalLengthCm’数据给我一个'直方图',提供图表,并给出分析结果" #! 可以用设定dpi=300来输出高质量的图表。(注:图的解析度dpi设定为300) environ_settings = """【背景要求】如果我没有告诉你任何定制化的要求,那么请你按照以下的默认要求来回答: ------------------------------------------------------------------------- 1. 你需要用提问的语言来回答(如:中文提问你就用中文来回答,英文提问你就用英文来回答)。 2. 如果要求你输出图表,那么图的解析度dpi需要设定为600。图尽量使用seaborn库。seaborn库的参数设定:sns.set(rc={'axes.facecolor':'#FFF9ED','figure.facecolor':'#FFF9ED'}, palette='dark'。 3. 图上所有的文字全部翻译成<英文English>来表示。 4. 你回答的文字内容必须尽可能的详细且通俗易懂。 5. 回答时尽可能地展示分析所对应的图表,并提供分析结果。 你需要按如下格式提供内容: 5.1 提供详细且专业的分析结果,提供足够的分析依据。 5.2 给出可能造成这一结果的可能原因有哪些? 以上内容全部用【1/2/3这样的序列号格式】来表达。 ------------------------------------------------------------------------- """ # seaborn中的palette参数可以设定图表的颜色,选项包括:deep, muted, pastel, bright, dark, colorblind,Spectral。更多参数可以参考:https://seaborn.pydata.org/generated/seaborn.color_palette.html。 # uploaded_file_path = upload_file() user_request = environ_settings + "\n\n" + \ "你需要完成以下任务:\n\n" + prompt + "\n\n" \ f"注:文件位置在 {uploaded_file_path}" user_request = str(prompt) print('user_request: \n', user_request) # 加载上传的文件,主要路径在上面代码中。 files = [File.from_path(str(uploaded_file_path))] # files = [File.from_path("/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/rawdata/short_csv.csv")] # st.write(pd.DataFrame(files)) # print('session.__init__', session.__init__) # print('session', session.__init__) with st.status('Thinking...', expanded=True, state='running') as status: # generate the response # response = await session.generate_response(user_msg=user_request, files=files, detailed_error=True) response = await session.generate_response(user_msg=user_request, files=files, detailed_error=True) # output to the user print("AI: ", response.content) full_response = response.content ### full_response = "this is full response" # for file in response.files: for i, file in enumerate(response.files): # await file.asave(f"/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/output{i}.png") ##working. # st.image(file.get_image()) #! working. # * 注意这里的设定,可以提高图片的精细程度。 st.image(file.get_image(), width=None, output_format='PNG') # message_placeholder.markdown(full_response + "▌") ## orignal code. # message_placeholder.markdown(full_response) ## orignal code. st.write(full_response) status.update(label='complete', state='complete') # TODO: 确认是否要记录所有的full response。 st.session_state.messages.append( {"role": "assistant", "content": full_response}) await session.astop() # ! 确认需要关闭。 # st.session_state.messages.append({"role": "assistant", "content": full_response}) ### authentication with a local yaml file. import yaml from yaml.loader import SafeLoader with open('./config.yaml') as file: config = yaml.load(file, Loader=SafeLoader) authenticator = stauth.Authenticate( config['credentials'], config['cookie']['name'], config['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized'] ) ###'''authentication with a remove cloud-based database.''' # authentication with a remove cloud-based database. # 导入云端用户数据库。 # DETA_KEY = "c0zegv33efm_4MBTaoQAn76GzUfsZeKV64Uh9qMY3WZb" # load_dotenv(".env") # DETA_KEY = os.getenv("DETA_KEY") # print(DETA_KEY) # deta = Deta(DETA_KEY) # # mybase is the name of the database in Deta. You can change it to any name you want. # credentials = {"usernames":{}} # users = [] # email = [] # passwords = [] # names = [] # for row in db.fetch_all_users(): # users.append(row["username"]) # email.append(row["email"]) # names.append(row["key"]) # passwords.append(row["password"]) # hashed_passwords = stauth.Hasher(passwords).generate() ## 需要严格的按照yaml文件的格式来定义如下几个字段。 # for un, name, pw in zip(users, names, hashed_passwords): # # user_dict = {"name":name,"password":pw} # user_dict = {"name": un, "password": pw} # # credentials["usernames"].update({un:user_dict}) # credentials["usernames"].update({un: user_dict}) # ## sign-up模块,未完成。 # database_table = [] # # print(pd.DataFrame(credentials)) # for i in credentials['usernames'].keys(): # # print("i:",i) # # print("name",credentials['usernames'][i]['name']) # # print("password",credentials['usernames'][i]['password']) # database_table.append([i,credentials['usernames'][i]['name'],credentials['usernames'][i]['password']]) # print("database_table:",database_table) # authenticator = stauth.Authenticate( # credentials=credentials, cookie_name="joeshi_gpt", key='abcedefg', cookie_expiry_days=30) # ## sign-up widget,未完成。 # try: # if authenticator.register_user('新用户注册', preauthorization=False): # # for list in database_table: # # db.update_user(username=list[0], name=list[1], password=list[2]) # db.update_user(username=list[-1][0], name=list[-1][1], password=list[-1][2]) # # st.success('User registered successfully') # st.success('注册成功!') # except Exception as e: # st.error(e) '''''' # user, authentication_status, username = authenticator.login('用户登录', 'main') user, authentication_status, username = authenticator.login('用户登录', 'sidebar') # print("name", name, "username", username) if authentication_status: with st.sidebar: st.markdown( """