File size: 9,498 Bytes
b862271
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
##TODO: 1. 多个图片处理。 2. 目前有个bug,在数据模式下,需要点击一下‘清除所有记录‘这个按键,或者重新输入一下内容。

# -*- coding: utf-8 -*-
import requests
import streamlit as st
import openai
import os
import datetime
from dotenv import load_dotenv
import numpy as np
import pandas as pd
import csv
import tempfile
from tempfile import NamedTemporaryFile
import pathlib
from pathlib import Path
import re
from re import sub
import matplotlib.pyplot as plt
from itertools import product
from tqdm import tqdm_notebook, tqdm, trange
import time
from time import sleep
import seaborn as sns
from matplotlib.pyplot import style
from rich import print
import autogen
from autogen import AssistantAgent
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
from st_reset_conversation import reset_all, reset_message
import asyncio
# import file
# import pyqt5
from PIL import Image  # type: ignore
import warnings
warnings.filterwarnings('ignore')
# sns.set()
import matplotlib
matplotlib.use('Agg') ## https://blog.51cto.com/u_15642578/5301647

load_dotenv()
### 设置openai的API key
os.environ["OPENAI_API_KEY"] = os.environ['user_token']
openai.api_key = os.environ['user_token']

# reset_all()

# st.title('Microsoft AutoGen - Streamlit Demo')

##NOTE: working,可以在MS AutoGen中的agent的system_message中添加一段提示信息。而且不会在st中显示。
time_stamp = str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + str(np.random.randint(0, 1000000))

## 以下两个类基本相同,唯一的区别是基类不同。
class TrackableAssistentAgent(AssistantAgent):
    def _process_received_message(self, message, sender, silent):
        with st.chat_message(sender.name): ##sender.name好像是streamlit的要求。
            if "execution succeeded" in message:
                if os.path.exists(f'/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Microsoft AutoGen/output_{time_stamp}.png'):
                    st.image(f'/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Microsoft AutoGen/output_{time_stamp}.png')
                    st.markdown(message)
                    st.markdown('任务已经完成,请查看屏幕上的显示信息!')
                else:
                    st.markdown(message)
                    st.markdown('任务已经完成,请查看屏幕上的显示信息!')
                st.stop()
            else: 
                st.markdown(message)
            
        return super()._process_received_message(message, sender, silent)

## 以下两个类基本相同,唯一的区别是基类不同。
class TrackableUserProxyAgent(UserProxyAgent):
    def _process_received_message(self, message, sender, silent):
        # with st.status('思考中...', expanded=True, state='running') as status:
        with st.chat_message(sender.name):
            if "execution succeeded" in message:
                if os.path.exists(f'/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Microsoft AutoGen/output_{time_stamp}.png'):
                    # st.markdown('有图片!!')
                    st.image(f'/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Microsoft AutoGen/output_{time_stamp}.png')
                    st.markdown(message)
                    st.markdown('任务已经完成,请查看屏幕上的显示信息!')
                else:
                    st.markdown(message)
                    st.markdown('任务已经完成,请查看屏幕上的显示信息!')
                st.stop()
            else:
                st.markdown(message)
        return super()._process_received_message(message, sender, silent)




# async def auto_gen(uploaded_file_path): ## async一定需要在主程序中用asyncio.run(st_msautogen.auto_gen(uploaded_file_path))启动。
def auto_gen(uploaded_file_path):
    # st.title('Microsoft AutoGen - Streamlit Demo')

    ##TODO 是否需要如下代码?
    ### Initialize chat history   
    if "messages" not in st.session_state:
        st.session_state.messages = []

    ### Display chat messages from history on app rerun
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.markdown(message["content"])
    # print('msautogen uploaded_file_path:', uploaded_file_path)
    print('running time:', datetime.datetime.now().strftime("%H:%M:%S.%f"))

    prompt = st.chat_input("说点什么吧...")
    # print('prompt:', prompt)

    system_prompt = f"""You are a helpful AI assistant. Solve tasks using your coding and language skills. In the following cases, suggest python code (in a python coding block) or shell script (in a sh coding block) for the user to execute.
    1. When you need to collect info, use the code to output the info you need, for example, browse or search the web, download/read a file, print the content of a webpage or a file, get the current date/time, check the operating system. After sufficient info is printed and the task is ready to be solved based on your language skill, you can solve the task by yourself.
    2. When you need to perform some task with code, use the code to perform the task and output the result. Finish the task smartly. Solve the task step by step if you need to. If a plan is not provided, explain your plan first. Be clear which step uses code, and which step uses your language skill.When using code, you must indicate the script type in the code block. The user cannot provide any other feedback or perform any other action beyond executing the code you suggest. The user can't modify your code. So do not suggest incomplete code which requires users to modify. Don't use a code block if it's not intended to be executed by the user. If you want the user to save the code in a file before executing it, put # filename: <filename> inside the code block as the first line. Don't include multiple code blocks in one response. Do not ask users to copy and paste the result. Instead, use 'print' function for the output when relevant. Check the execution result returned by the user.If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try. When you find an answer, verify the answer carefully. Include verifiable evidence in your response if possible. 
    Reply "TERMINATE" in the end when everything is done.
    Other settings include:
    * 如果我需要你分析上传的文件,那么文件的位置在:{uploaded_file_path}
    * 如果要求你输出图表,那么图的解析度dpi需要设定为300。图使用seaborn库。seaborn库的参数设定:'axes.facecolor':'#FFF9ED','figure.facecolor':'#FFF9ED', palette='dark'。
    * 如果需要你输出图片,那么将图片保持到以下地址:'/Users/yunshi/Downloads/360Data/Data Center/Working-On Task/演讲与培训/2023ChatGPT/Microsoft AutoGen/output_{time_stamp}.png'。
    * 回答时尽可能地展示分析所对应的图表,并提供分析结果。 你需要按如下格式提供内容:
        1. 提供详细且专业的分析结果,提供足够的分析依据。
        2. 给出可能造成这一结果的可能原因有哪些?
        以上这些内容全部用序列号格式来表达。
    """
    
    if prompt:
        llm_config = {
            # "request_timeout": 600,
            "config_list":[{
                "model": "gpt-4-1106-preview",
                # "model": "gpt-4",
                # "model": "gpt-3.5-turbo-16k",
                "api_key": os.environ["OPENAI_API_KEY"],
            }], ## 注意这里的格式,否则会在后期输出会报错
        }

        ## check if the prompt has been passed in. 
        print('prompt after llm_config:', prompt)
        
        ## create an assistant.
        assistant = TrackableAssistentAgent(name='assistant', llm_config=llm_config, system_message=system_prompt)

        ## create a user proxy. 
        ### see notes at https://microsoft.github.io/autogen/docs/FAQ#use-the-constructed-configuration-list-in-agents
        user_proxy = TrackableUserProxyAgent(name='user',human_input_mode='NEVER',llm_config=llm_config)

        ## create a loop event
        loop = asyncio.new_event_loop()
        # Set the loop as the event loop.
        asyncio.set_event_loop(loop)

        ### define an async function.
        async def start_chat():
            try:
                # print('333')
                await user_proxy.a_initiate_chat(assistant, message=prompt, silent=True)
                # user_proxy.a_initiate_chat(assistant, message=prompt, silent=True) ## not working, the function not even start up. 
                print('444')
            except Exception as e:
                print('start_chat function not working because:', e)
                pass
        
        print('111')
        ## run the async function within the event loop.
        loop.run_until_complete(start_chat())
        print('222')
        
        # # #TODO:尝试停止一个async中的loop。
        loop.close()

# auto_gen()