ashm commited on
Commit
d4ea1a4
·
1 Parent(s): a39df16

Upload 21 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user . $HOME/app
8
+ COPY ./requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
__pycache__/app.cpython-39.pyc ADDED
Binary file (4.43 kB). View file
 
app.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import json
3
+ import ast
4
+ import os
5
+ import chainlit as cl
6
+ from functions.FunctionManager import FunctionManager
7
+ import inspect
8
+ import os
9
+ import tiktoken
10
+ import importlib
11
+ import json
12
+ from chainlit import user_session
13
+
14
+ # Get all subdirectories under the 'plugins' directory, ignoring directories named '__pycache__'
15
+ plugin_dirs = [d for d in os.listdir('plugins')
16
+ if os.path.isdir(os.path.join('plugins', d)) and d != '__pycache__']
17
+
18
+ functions = []
19
+
20
+ # Iterate through each subdirectory (i.e., each plugin)
21
+ for dir in plugin_dirs:
22
+ # Try to read the plugin's configuration file
23
+ try:
24
+ with open(f'plugins/{dir}/config.json', 'r') as f:
25
+ config = json.load(f)
26
+ enabled = config.get('enabled', True)
27
+ except FileNotFoundError:
28
+ # If the configuration file does not exist, we assume this plugin should be imported
29
+ enabled = True
30
+
31
+ # Check if this plugin should be imported
32
+ if not enabled:
33
+ continue
34
+
35
+ # Dynamically import each plugin's functions module
36
+ module = importlib.import_module(f'plugins.{dir}.functions')
37
+
38
+ # Get all functions in the module and add them to the functions list
39
+ functions.extend([
40
+ obj for name, obj in inspect.getmembers(module)
41
+ if inspect.isfunction(obj)
42
+ ])
43
+
44
+ function_manager = FunctionManager(functions=functions)
45
+ print("functions:", function_manager.generate_functions_array())
46
+
47
+ max_tokens = 5000
48
+
49
+
50
+ def __truncate_conversation(conversation) -> None:
51
+ """
52
+ Truncate the conversation
53
+ """
54
+ # Take the first one out
55
+ system_con = conversation[0]
56
+ # Remove the first one
57
+ conversation = conversation[1:]
58
+ while True:
59
+ if (get_token_count(conversation) > max_tokens
60
+ and len(conversation) > 1):
61
+ # Don't remove the first message
62
+ conversation.pop(1)
63
+ else:
64
+ break
65
+ # Add the first one back
66
+ conversation.insert(0, system_con)
67
+ return conversation
68
+
69
+ # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
70
+
71
+
72
+ def get_token_count(conversation) -> int:
73
+ """
74
+ Get token count
75
+ """
76
+ encoding = tiktoken.encoding_for_model('gpt-3.5-turbo')
77
+
78
+ num_tokens = 0
79
+ for message in conversation:
80
+ # every message follows <im_start>{role/name}\n{content}<im_end>\n
81
+ num_tokens += 4
82
+ for key, value in message.items():
83
+ num_tokens += len(encoding.encode(str(value)))
84
+ if key == "name": # if there's a name, the role is omitted
85
+ num_tokens += -1 # role is always required and always 1 token
86
+ num_tokens += 2 # every reply is primed with <im_start>assistant
87
+ return num_tokens
88
+
89
+
90
+ MAX_ITER = 5
91
+
92
+
93
+ async def on_message(user_message: object):
94
+ print("==================================")
95
+ print(user_message)
96
+ print("==================================")
97
+ user_message = str(user_message)
98
+ message_history = cl.user_session.get("message_history")
99
+ message_history.append({"role": "user", "content": user_message})
100
+
101
+ cur_iter = 0
102
+
103
+ while cur_iter < MAX_ITER:
104
+
105
+ # OpenAI call
106
+ openai_message = {"role": "", "content": ""}
107
+ function_ui_message = None
108
+ content_ui_message = cl.Message(content="")
109
+ stream_resp = None
110
+ send_message = __truncate_conversation(message_history)
111
+ try:
112
+ async for stream_resp in await openai.ChatCompletion.acreate(
113
+ model="gpt-3.5-turbo",
114
+ messages=send_message,
115
+ stream=True,
116
+ function_call="auto",
117
+ functions=function_manager.generate_functions_array(),
118
+ temperature=0.3
119
+ ): # type: ignore
120
+ new_delta = stream_resp.choices[0]["delta"]
121
+ openai_message, content_ui_message, function_ui_message = await process_new_delta(
122
+ new_delta, openai_message, content_ui_message,
123
+ function_ui_message)
124
+ except Exception as e:
125
+ print(e)
126
+ cur_iter += 1
127
+ continue
128
+
129
+ if stream_resp is None:
130
+ break
131
+
132
+ message_history.append(openai_message)
133
+ if function_ui_message is not None:
134
+ await function_ui_message.send()
135
+
136
+ if stream_resp.choices[0]["finish_reason"] == "stop":
137
+ break
138
+ elif stream_resp.choices[0]["finish_reason"] != "function_call":
139
+ raise ValueError(stream_resp.choices[0]["finish_reason"])
140
+ # if code arrives here, it means there is a function call
141
+ function_name = openai_message.get("function_call").get("name")
142
+ print(openai_message.get("function_call"))
143
+ try:
144
+ arguments = json.loads(
145
+ openai_message.get("function_call").get("arguments"))
146
+ except:
147
+ arguments = ast.literal_eval(
148
+ openai_message.get("function_call").get("arguments"))
149
+
150
+ function_response = await function_manager.call_function(
151
+ function_name, arguments)
152
+ # print(function_response)
153
+
154
+ message_history.append({
155
+ "role": "function",
156
+ "name": function_name,
157
+ "content": function_response,
158
+ })
159
+
160
+ await cl.Message(
161
+ author=function_name,
162
+ content=str(function_response),
163
+ language="json",
164
+ indent=1,
165
+ ).send()
166
+ cur_iter += 1
167
+
168
+
169
+ async def process_new_delta(new_delta, openai_message, content_ui_message,
170
+ function_ui_message):
171
+ if "role" in new_delta:
172
+ openai_message["role"] = new_delta["role"]
173
+ if "content" in new_delta:
174
+ new_content = new_delta.get("content") or ""
175
+ openai_message["content"] += new_content
176
+ await content_ui_message.stream_token(new_content)
177
+ if "function_call" in new_delta:
178
+ if "name" in new_delta["function_call"]:
179
+ openai_message["function_call"] = {
180
+ "name": new_delta["function_call"]["name"]
181
+ }
182
+ await content_ui_message.send()
183
+ function_ui_message = cl.Message(
184
+ author=new_delta["function_call"]["name"],
185
+ content="",
186
+ indent=1,
187
+ language="json")
188
+ await function_ui_message.stream_token(
189
+ new_delta["function_call"]["name"])
190
+
191
+ if "arguments" in new_delta["function_call"]:
192
+ if "arguments" not in openai_message["function_call"]:
193
+ openai_message["function_call"]["arguments"] = ""
194
+ openai_message["function_call"]["arguments"] += new_delta[
195
+ "function_call"]["arguments"]
196
+ await function_ui_message.stream_token(
197
+ new_delta["function_call"]["arguments"])
198
+ return openai_message, content_ui_message, function_ui_message
199
+
200
+
201
+ @cl.on_chat_start
202
+ def start_chat():
203
+ cl.user_session.set(
204
+ "message_history",
205
+ [{
206
+ "role": "system",
207
+ "content": """
208
+ you are now chatting with an AI assistant. The assistant is helpful, creative, clever, and very friendly.
209
+ """
210
+ }],
211
+ )
212
+
213
+
214
+ @cl.on_message
215
+ async def run_conversation(user_message: object):
216
+ await on_message(user_message)
chainlit.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! 🚀🤖
2
+
3
+ Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links 🔗
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/ZThrUxbAYw) to ask questions, share your projects, and connect with other developers! 💬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! 💻😊
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
functions/FunctionManager.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import json
3
+ import re
4
+
5
+ import requests
6
+ import openai
7
+
8
+
9
+ class FunctionManager:
10
+ def __init__(self, functions=None):
11
+ self.functions = {}
12
+ self.excluded_functions = {'inspect', 'create_engine'} # 添加这行
13
+ if functions:
14
+ for func in functions:
15
+ self.functions[func.__name__] = func
16
+
17
+ def add_function(self, func):
18
+ self.functions[func.__name__] = func
19
+
20
+ def generate_functions_array(self):
21
+ type_mapping = {
22
+ "str": "string",
23
+ "int": "integer",
24
+ "float": "number",
25
+ "bool": "boolean",
26
+ "list": "array",
27
+ "dict": "object"
28
+ }
29
+ functions_array = []
30
+
31
+ for function_name, function in self.functions.items():
32
+ if function_name in self.excluded_functions: # 添加这行
33
+ continue
34
+ # 获取函数的文档字符串和参数列表
35
+ docstring = function.__doc__
36
+ parameters = inspect.signature(function).parameters
37
+
38
+ # 提取函数描述
39
+ docstring_lines = docstring.strip().split(
40
+ '\n') if docstring else []
41
+ function_description = docstring_lines[0].strip(
42
+ ) if docstring_lines else ''
43
+
44
+ # 解析参数列表并生成函数描述
45
+ function_info = {
46
+ "name": function_name,
47
+ "description": function_description,
48
+ "parameters": {
49
+ "type": "object",
50
+ "properties": {},
51
+ "required": [] # Add a required field
52
+ }
53
+ }
54
+
55
+ for parameter_name, parameter in parameters.items():
56
+ # 获取参数的注释
57
+ parameter_annotation = parameter.annotation
58
+ if parameter_annotation == inspect.Parameter.empty:
59
+ continue
60
+
61
+ # 如果注解是一个类型,获取它的名字
62
+ # 如果注解是一个字符串,直接使用它
63
+ if isinstance(parameter_annotation, type):
64
+ parameter_annotation_name = parameter_annotation.__name__.lower(
65
+ )
66
+ else:
67
+ parameter_annotation_name = parameter_annotation.lower()
68
+
69
+ # 提取参数描述
70
+ param_description_pattern = rf"{parameter_name}: (.+)"
71
+ param_description_match = [
72
+ re.search(param_description_pattern, line)
73
+ for line in docstring_lines
74
+ ]
75
+ param_description = next(
76
+ (match.group(1)
77
+ for match in param_description_match if match), '')
78
+
79
+ # 添加参数描述
80
+ parameter_description = {
81
+ "type":
82
+ type_mapping.get(parameter_annotation_name,
83
+ parameter_annotation_name),
84
+ "description":
85
+ param_description
86
+ }
87
+ function_info["parameters"]["properties"][
88
+ parameter_name] = parameter_description
89
+
90
+ # If the parameter has no default value, add it to the required field.
91
+ if parameter.default == inspect.Parameter.empty:
92
+ function_info["parameters"]["required"].append(
93
+ parameter_name)
94
+
95
+ functions_array.append(function_info)
96
+
97
+ return functions_array
98
+
99
+ async def call_function(self, function_name, args_dict):
100
+ if function_name not in self.functions:
101
+ raise ValueError(f"Function '{function_name}' not found")
102
+
103
+ function = self.functions[function_name]
104
+ # {"role": "function", "name": "get_current_weather", "content": "{\"temperature\": "22", \"unit\": \"celsius\", \"description\": \"Sunny\"}"}
105
+ print(function, args_dict)
106
+ res = await function(**args_dict)
107
+ # 如果返回的内容是元祖或者列表或者字典,那么就返回一个json字符串
108
+ if isinstance(res, (tuple, list, dict)):
109
+ res = json.dumps(res)
110
+ return res
111
+
112
+
113
+ # 测试
114
+ def get_current_weather(location: str, unit: str = "celsius"):
115
+ """
116
+ Get the current weather in a given location.
117
+
118
+ Parameters:
119
+ - location: The city and state, e.g. San Francisco, CA
120
+ - unit: The unit of temperature (celsius or fahrenheit)
121
+ """
122
+ return {"temperature": "22", "unit": "celsius", "description": "Sunny"}
123
+
124
+
125
+ # 定义一个方法来根据传进来的url地址,读取网页的内容
126
+ def get_html(url: str):
127
+ # 定义一个请求头,模拟浏览器访问
128
+ """
129
+ Get the html content of the url.if user provide the url,then return the html content of the url.
130
+ Parameters:
131
+ url: The url of the website. (required)
132
+ """
133
+ headers = {
134
+ 'User-Agent':
135
+ 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) '
136
+ 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
137
+ }
138
+ # 发送请求
139
+ response = requests.get(url, headers=headers)
140
+ # 返回网页内容
141
+ return response.text
142
+
143
+
144
+ def search_by_bard(content: str):
145
+ """
146
+ Search the content(translate to English language) by bard.if the input content that you don't know how to say, you can use this function.
147
+ Parameters:
148
+ content: The content to search.please change the content language to English.(required)
149
+ """
150
+ print(content)
151
+ response = openai.ChatCompletion.create(model="bard",
152
+ messages=[{
153
+ 'role': 'user',
154
+ 'content': content
155
+ }],
156
+ stream=False,
157
+ temperature=0)
158
+ print(response)
159
+ return {'content': response['choices'][0]['message']['content']}
160
+
161
+
162
+ if __name__ == "__main__":
163
+ function_manager = FunctionManager(functions=[search_by_bard])
164
+ functions_array = function_manager.generate_functions_array()
165
+ print(functions_array)
166
+
167
+ # result = function_manager.call_function('get_current_weather', {'location': 'San Francisco, CA', 'unit': 'celsius'})
168
+ # print(result)
functions/__init__.py ADDED
File without changes
functions/__pycache__/FunctionManager.cpython-39.pyc ADDED
Binary file (4.42 kB). View file
 
functions/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (198 Bytes). View file
 
plugins/.DS_Store ADDED
Binary file (6.15 kB). View file
 
plugins/common/__pycache__/functions.cpython-39.pyc ADDED
Binary file (1.8 kB). View file
 
plugins/common/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "enabled": true
3
+ }
plugins/common/functions.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import chainlit as cl
3
+
4
+
5
+ async def need_file_upload():
6
+ """
7
+ When the user's question mentions handling files, you need to upload files, you can call this function.
8
+ Parameters: None
9
+ """
10
+ if not os.path.exists('./tmp'):
11
+ os.mkdir('./tmp')
12
+ files = await cl.AskFileMessage(
13
+ content="Please upload a text file to begin!",
14
+ max_size_mb=50,
15
+ accept=[
16
+ "text/plain",
17
+ "image/png",
18
+ "image/jpeg",
19
+ "application/pdf",
20
+ "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # for .xlsx files
21
+ "application/vnd.ms-excel", # for .xls files
22
+ "text/csv", # for .csv files
23
+ # More MIME types here as needed.
24
+ ]).send()
25
+ file = files[0]
26
+
27
+ # 保存文件到paths目录下
28
+ # 判断paths目录是否存在
29
+ file_path = f"./tmp/{file.name}"
30
+ # 保存文件
31
+ content = file.content
32
+ file_name = file.name
33
+ file_type = file.type
34
+ # 保存文件
35
+ # content是bytes类型
36
+ with open(file_path, "wb") as f:
37
+ f.write(content)
38
+ return {
39
+ 'type': 'file',
40
+ 'path': file_path,
41
+ 'name': file_name,
42
+ 'file_type': file_type
43
+ }
44
+
45
+
46
+ async def show_images(paths: str):
47
+ """
48
+ If your return contains images in png or jpg format, you can call this function to display the images.
49
+ Parameters: paths: The paths of the images as a comma-separated string.(required)
50
+ """
51
+ path_list = paths.split(',')
52
+ elments = []
53
+ for i, path in enumerate(path_list):
54
+ tmp_image = cl.Image(name=f"image{i}",
55
+ path=path.strip(),
56
+ display="inline")
57
+ tmp_image.size = "large"
58
+ elments.append(tmp_image)
59
+
60
+ await cl.Message(content="",
61
+ elements=elments).send() # type: ignore
62
+
63
+ return {'description': '图片已经显示成功了,下面的回复中不再需要展示它了'}
plugins/python/__pycache__/executor.cpython-39.pyc ADDED
Binary file (2.85 kB). View file
 
plugins/python/__pycache__/functions.cpython-39.pyc ADDED
Binary file (2.74 kB). View file
 
plugins/python/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "enabled": true
3
+ }
plugins/python/executor.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import sys
3
+ import io
4
+ import ast
5
+ import subprocess
6
+ from contextlib import redirect_stdout
7
+ from loguru import logger
8
+
9
+ logger.configure(
10
+ handlers=[
11
+ {
12
+ "sink": sys.stderr,
13
+ "format": "<green>{time}</green> <level>{message}</level>",
14
+ "colorize": True,
15
+ }
16
+ ]
17
+ )
18
+
19
+
20
+ class Executor(abc.ABC):
21
+ @abc.abstractmethod
22
+ def execute(self, code: str) -> str:
23
+ pass
24
+
25
+
26
+ class PythonExecutor(Executor):
27
+ locals = {}
28
+
29
+ def execute(self, code: str) -> str:
30
+ logger.info("Executing Python code: {}", code)
31
+ output = io.StringIO()
32
+
33
+ # Parse the code into an AST.
34
+ tree = ast.parse(code, mode="exec")
35
+
36
+ try:
37
+ # Redirect standard output to our StringIO instance.
38
+ with redirect_stdout(output):
39
+ for node in tree.body:
40
+ # Compile and execute each node.
41
+ exec(
42
+ compile(
43
+ ast.Module(body=[node], type_ignores=[]), "<ast>", "exec"
44
+ ),
45
+ None,
46
+ PythonExecutor.locals,
47
+ )
48
+
49
+ # If the node is an expression, print its result.
50
+ if isinstance(node, ast.Expr):
51
+ eval_result = eval(
52
+ compile(ast.Expression(body=node.value), "<ast>", "eval"),
53
+ None,
54
+ PythonExecutor.locals,
55
+ )
56
+ if eval_result is not None:
57
+ print(eval_result)
58
+ except Exception as e:
59
+ logger.error("Error executing Python code: {}", e)
60
+ return str(e)
61
+
62
+ # Retrieve the output and return it.
63
+ return output.getvalue()
64
+
65
+
66
+ class CppExecutor(Executor):
67
+ def execute(self, code: str) -> str:
68
+ with open("script.cpp", "w") as f:
69
+ f.write(code)
70
+ try:
71
+ subprocess.run(["g++", "script.cpp"], check=True)
72
+ output = subprocess.run(
73
+ ["./a.out"], capture_output=True, text=True, check=True
74
+ )
75
+ return output.stdout
76
+ except subprocess.CalledProcessError as e:
77
+ # Here we include e.stderr in the output.
78
+ raise subprocess.CalledProcessError(e.returncode, e.cmd, output=e.stderr)
79
+
80
+
81
+ class RustExecutor(Executor):
82
+ def execute(self, code: str) -> str:
83
+ with open("script.rs", "w") as f:
84
+ f.write(code)
85
+ try:
86
+ subprocess.run(["rustc", "script.rs"], check=True)
87
+ output = subprocess.run(
88
+ ["./script"], capture_output=True, text=True, check=True
89
+ )
90
+ return output.stdout
91
+ except subprocess.CalledProcessError as e:
92
+ # Here we include e.stderr in the output.
93
+ raise subprocess.CalledProcessError(e.returncode, e.cmd, output=e.stderr)
plugins/python/functions.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import subprocess
3
+ import sys
4
+ import chainlit as cl
5
+ import os
6
+ from .executor import PythonExecutor
7
+
8
+
9
+ async def python_exec(code: str, language: str = "python"):
10
+ """
11
+ Exexute code. \nNote: This endpoint current supports a REPL-like environment for Python only.\n\nArgs:\n request (CodeExecutionRequest): The request object containing the code to execute.\n\nReturns:\n CodeExecutionResponse: The result of the code execution.
12
+ Parameters: code: (str, required): A Python code snippet for execution in a Jupyter environment, where variables and imports from previously executed code are accessible. The code must not rely on external variables/imports not available in this environment, and must print a dictionary `{"type": "<type>", "path": "<path>", "status": "<status>"}` as the last operation. `<type>` can be "image", "file", or "content", `<path>` is the file path (not needed if `<type>` is "content"), `<status>` indicates execution status. Display operations should save output as a file with path returned in the dictionary. If tabular data is generated, it should be directly returned as a string. The code must end with a `print` statement.the end must be print({"type": "<type>", "path": "<path>", "status": "<status>"})
13
+ """
14
+
15
+ myexcutor = PythonExecutor()
16
+ code_output = myexcutor.execute(code)
17
+ print(f"REPL execution result: {code_output}")
18
+ response = {"result": code_output.strip()}
19
+ return response
20
+
21
+ async def need_install_package(package_name: str) -> dict:
22
+ """
23
+ If the user's question mentions installing packages, and the packages need to be installed,
24
+ you can call this function.
25
+ Parameters: package_name: The name of the package.(required)
26
+ """
27
+ # check if package is already installed
28
+ cmd_check = [sys.executable, '-m', 'pip', 'show', package_name]
29
+ proc = subprocess.Popen(cmd_check,
30
+ stdout=subprocess.PIPE,
31
+ stderr=subprocess.PIPE)
32
+ out, _ = proc.communicate()
33
+ if out:
34
+ return {'description': f"{package_name} is already installed"}
35
+
36
+ # install package if it's not installed
37
+ cmd_install = [sys.executable, '-m', 'pip', 'install', package_name]
38
+ process = await asyncio.create_subprocess_exec(
39
+ *cmd_install,
40
+ stdout=asyncio.subprocess.PIPE,
41
+ stderr=asyncio.subprocess.PIPE)
42
+
43
+ stdout, stderr = await process.communicate()
44
+
45
+ if process.returncode != 0:
46
+ await cl.Message(content=f"Failed to install {package_name}.").send()
47
+ return {
48
+ 'description':
49
+ f"Error installing {package_name}: {stderr.decode()}"
50
+ }
51
+ await cl.Message(content=f"Successfully installed {package_name}.").send()
52
+ return {'description': f"{package_name} has been successfully installed"}
plugins/vue/__pycache__/functions.cpython-39.pyc ADDED
Binary file (3.39 kB). View file
 
plugins/vue/config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "enabled": true
3
+ }
plugins/vue/functions.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import subprocess
2
+
3
+ async def vue_install_package(path: str, package_name: str):
4
+ """
5
+ This function is used to install a package in the Vue project.
6
+ Parameters:
7
+ package_name : The name of the package.
8
+ path : The path of the project.
9
+ """
10
+ try:
11
+ subprocess.run(["npm", "install", package_name], cwd=path)
12
+ return {
13
+ "status": "true",
14
+ "description": "Package installed successfully.",
15
+ }
16
+ except Exception as e:
17
+ return {"status": "false", "description": str(e)}
18
+
19
+ async def vue_create_directory(path: str, directory_name: str):
20
+ """
21
+ This function is used to create a directory in the Vue project.
22
+ Parameters:
23
+ path : The path of the project.
24
+ directory_name : The name of the directory.
25
+ """
26
+ try:
27
+ subprocess.run(["mkdir", directory_name], cwd=path)
28
+ return {
29
+ "status": "true",
30
+ "description": "Directory created successfully.",
31
+ }
32
+ except Exception as e:
33
+ return {"status": "false", "description": str(e)}
34
+
35
+ async def vue_create_file(path: str, file_name: str):
36
+ """
37
+ This function is used to create a file in the Vue project.
38
+ Parameters:
39
+ path : The path of the project.
40
+ file_name : The name of the file.
41
+ """
42
+ try:
43
+ subprocess.run(["touch", file_name], cwd=path)
44
+ return {
45
+ "status": "true",
46
+ "description": "File created successfully.",
47
+ }
48
+ except Exception as e:
49
+ return {"status": "false", "description": str(e)}
50
+
51
+ async def vue_get_project_file_list(path: str):
52
+ """
53
+ This function is used to get the file list of the Vue project.
54
+ Parameters:
55
+ project_name : The name of the project.
56
+ path : The path of the project.
57
+ """
58
+ # 使用ls命令获取文件列表
59
+ try:
60
+ tree = subprocess.run(["ls", path], capture_output=True)
61
+ return {
62
+ "status": "true",
63
+ "description": tree.stdout.decode(),
64
+ }
65
+ except Exception as e:
66
+ return {"status": "false", "description": str(e)}
67
+
68
+
69
+ async def get_vue_project_file_content(path: str, file_name: str):
70
+ """
71
+ This function is used to get the content of a file in the Vue project.
72
+ Parameters:
73
+ path : The path of the file you want to write to.
74
+ file_name : The name of the file.
75
+ """
76
+ try:
77
+ with open(f"{path}/{file_name}", "r") as f:
78
+ content = f.read()
79
+ return {
80
+ "status": "true",
81
+ "description": content,
82
+ }
83
+ except Exception as e:
84
+ return {"status": "false", "description": str(e)}
85
+
86
+ async def write_vue_project_file_content(path: str, file_name: str, content: str):
87
+ """
88
+ This function is used to write content to a file in the Vue project.
89
+ Parameters:
90
+ path : The path of the file you want to write to.
91
+ file_name : The name of the file.
92
+ content : The content to write.
93
+ """
94
+ try:
95
+ with open(f"{path}/{file_name}", "w") as f:
96
+ f.write(content)
97
+ return {
98
+ "status": "true",
99
+ "description": "File content written successfully.",
100
+ }
101
+ except Exception as e:
102
+ return {"status": "false", "description": str(e)}
103
+
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ chainlit==0.5.2
2
+ loguru==0.5.3
3
+ tiktoken==0.4.0
4
+ prompt_toolkit==3.0.39
5
+ scikit-learn
6
+ pandas
7
+ matplotlib
8
+ numpy