File size: 12,724 Bytes
21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 9a54855 21f9899 7fe89de 21f9899 b6c88e7 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 2933858 7fe89de 21f9899 7fe89de 21f9899 2933858 21f9899 2933858 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 2933858 21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 7fe89de 21f9899 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 |
'''
参考: https://github.com/shroominic/codeinterpreter-api
1. 可以存在本地,然后再调出来。 working.
1. 可以在临时文件夹中读取文件。
1. 可以直接在内存中读出图片。
1. 中文字体成功。
from matplotlib.font_manager import FontProperties
myfont=FontProperties(fname='/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/rawdata/SimHei.ttf')
sns.set_style('whitegrid',{'font.sans-serif':['simhei','Arial']})
'''
# TODO:
from codeinterpreterapi import CodeInterpreterSession, File
import streamlit as st
from codeinterpreterapi import CodeInterpreterSession
import openai
import os
import matplotlib.pyplot as plt
import xlrd
import pandas as pd
# from io import StringIO
# import csv
import tempfile
from tempfile import NamedTemporaryFile
import pathlib
from pathlib import Path
from matplotlib.font_manager import FontProperties
import seaborn as sns
os.environ["OPENAI_API_KEY"] = os.environ['user_token']
openai.api_key = os.environ['user_token']
os.environ["VERBOSE"] = "True" # 可以看到具体的错误?
# #* 如果碰到接口问题,可以启用如下设置。
# openai.proxy = {
# "http": "http://127.0.0.1:7890",
# "https": "http://127.0.0.1:7890"
# }
# layout settings.
st.title("个人大语言模型商业智能中心")
st.subheader("Artificial Intelligence Backend Center for Individuals")
# col1, col2 = st.columns(spec=[1, 2])
# radio_1 = col1.radio(label='ChatGPT版本', options=[
# 'GPT-3.5', 'GPT-4.0'], horizontal=True, label_visibility='visible')
# radio_2 = col2.radio(label='模式选择', options=[
# '核心模式', '联网模式', '数据模式'], horizontal=True, label_visibility='visible')
uploaded_file = st.file_uploader(
"选择一个文件", type=(["csv", "xlsx", "xls"]))
if uploaded_file is not None:
filename=uploaded_file.name
# st.write(filename) ## print out the whole file name to validate.
try:
if '.csv' in filename:
csv_file = pd.read_csv(uploaded_file)
st.write(csv_file[:3]) # 这里只是显示文件,后面需要定位文件所在的绝对路径。
else:
xls_file = pd.read_excel(uploaded_file)
st.write(xls_file[:3])
except Exception as e:
st.write(e)
uploaded_file_name = "File_provided"
temp_dir = tempfile.TemporaryDirectory()
# ! working.
uploaded_file_path = pathlib.Path(temp_dir.name) / uploaded_file_name
with open(uploaded_file_path, 'wb') as output_temporary_file:
# output_temporary_file.write(uploaded_file.read())
# ! 必须用这种格式读入内容,然后才可以写入temporary文件夹中。
output_temporary_file.write(uploaded_file.getvalue())
st.write(uploaded_file_path) # * 可以查看文件是否真实存在,然后是否可以
import requests
bing_search_api_key = os.environ['bing_api_key']
bing_search_endpoint = 'https://api.bing.microsoft.com/v7.0/search'
def search(query):
# Construct a request
# mkt = 'en-EN'
mkt = 'zh-CN'
params = {'q': query, 'mkt': mkt}
headers = {'Ocp-Apim-Subscription-Key': bing_search_api_key}
# Call the API
try:
response = requests.get(bing_search_endpoint, headers=headers, params=params)
response.raise_for_status()
json = response.json()
return json["webPages"]["value"]
# print("\nJSON Response:\n")
# pprint(response.json())
except Exception as e:
raise e
# openai.api_key = st.secrets["OPENAI_API_KEY"]
# async def main():
async def main():
col1, col2 = st.columns(spec=[1, 2])
radio_1 = col1.radio(label='ChatGPT版本', options=[
'GPT-3.5', 'GPT-4.0'], horizontal=True, label_visibility='visible')
radio_2 = col2.radio(label='模式选择', options=[
'核心模式', '联网模式', '数据模式'], horizontal=True, label_visibility='visible')
## Set a default model
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
if radio_1 == 'GPT-3.5':
print('radio_1: GPT-3.5 starts!')
st.session_state["openai_model"] = "gpt-3.5-turbo-16k"
else:
print('radio_1: GPT-4.0 starts!')
st.session_state["openai_model"] = "gpt-4"
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Display assistant response in chat message container
if prompt := st.chat_input("Ask something?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
if radio_2 == '数据分析模式':
print('数据分析模式启动!')
# clear cache to avoid any potential history problems.
st.cache_resource.clear()
with st.chat_message("assistant"):
# message_placeholder = st.empty()
# full_response = ""
async with CodeInterpreterSession() as session:
# user_request = "对于文件中的'SepalLengthCm’数据给我一个'直方图',提供图表,并给出分析结果"
#! 可以用设定dpi=300来输出高质量的图表。(注:图的解析度dpi设定为300)
environ_settings = """【背景要求】如果我没有告诉你任何定制化的要求,那么请你按照以下的默认要求来回答:
-------------------------------------------------------------------------
1. 你需要用提问的语言来回答(如:中文提问你就用中文来回答,英文提问你就用英文来回答)。
2. 如果要求你输出图表,那么图的解析度dpi需要设定为300。图尽量使用seaborn库。seaborn库的参数设定:sns.set(rc={'axes.facecolor':'#FFF9ED','figure.facecolor':'#FFF9ED'}, palette='dark'。
3. 如果需要显示中文,那么设置如下:
3.1 首先,你需要安装中文字体:
myfont=FontProperties(fname='/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/rawdata/SimHei.ttf')
3.2 然后,你需要设定在matplotlib(plt)和seaborn(sns)中设定:
sns.set_style({'font.sans-serif':['Arial','SimHei']})
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['font.family']='sans-serif'
plt.title(fontsize = 18)
-------------------------------------------------------------------------
""" # seaborn中的palette参数可以设定图表的颜色,选项包括:deep, muted, pastel, bright, dark, colorblind,Spectral。更多参数可以参考:https://seaborn.pydata.org/generated/seaborn.color_palette.html。
user_request = environ_settings + "\n\n" + \
"你需要完成以下任务:\n\n" + prompt + \
f"注:文件位置在{uploaded_file_path}"
print('user_request: \n', user_request)
# 加载上传的文件,主要路径在上面代码中。
files = [File.from_path(str(uploaded_file_path))]
with st.status('processing...', expanded=True, state='running') as status:
# generate the response
response = await session.generate_response(
user_request, files=files
)
# output to the user
print("AI: ", response.content)
full_response = response.content
### full_response = "this is full response"
# for file in response.files:
for i, file in enumerate(response.files):
# await file.asave(f"/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Coding/code_interpreter/output{i}.png") ##working.
# st.image(file.get_image()) #! working.
# * 注意这里的设定,可以提高图片的精细程度。
st.image(file.get_image(), width=None,
output_format='PNG')
# message_placeholder.markdown(full_response + "▌") ## orignal code.
# message_placeholder.markdown(full_response) ## orignal code.
st.write(full_response)
status.update(label='complete', state='complete')
# st.session_state.messages.append(
# {"role": "assistant", "content": full_response})
await session.astop() #! 确认需要关闭。
# st.session_state.messages.append({"role": "assistant", "content": full_response})
elif radio_2 == '联网模式':
# print('联网模式入口,prompt:', prompt)
input_message = prompt
internet_search_result = search(input_message)
search_prompt = [f"Source:\nTitle: {result['name']}\nURL: {result['url']}\nContent: {result['snippet']}" for result in internet_search_result]
prompt = "基于如下的互联网公开信息, 回答问题:\n\n" + "\n\n".join(search_prompt[:3]) + "\n\n问题: " + input_message + "你需要注意的是回答问题时必须用提问的语言(如英文或者中文)来提示:'答案基于互联网公开信息。'" + "\n\n答案: " ## 限制了只有3个搜索结果。
# prompt = "Use these sources to answer the question:\n\n" + "\n\n".join(search_prompt[0:3]) + "\n\nQuestion: " + input_message + "(注意:回答问题时请提示'以下答案基于互联网公开信息。')\n\n" + "\n\nAnswer: "
st.session_state.messages.append({"role": "user", "content": prompt})
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append(
{"role": "assistant", "content": full_response})
elif radio_2 == '核心模式':
print('GPT only starts!!!')
print('st.session_state now:', st.session_state)
# st.session_state.messages.append({"role": "system", "content": 'You are a helpful AI assistant: ChatGPT.'})
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
# if len(response)>0:
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append(
{"role": "assistant", "content": full_response})
if __name__ == "__main__":
import asyncio
# * 也可以用命令执行这个python文件。’streamlit run frontend/app.py‘
asyncio.run(main())
|