Spaces:
Runtime error
Runtime error
merge jittor branch
Browse files- docs/Dockerfile+JittorLLM +59 -0
- request_llm/bridge_all.py +45 -0
- request_llm/bridge_jittorllms_llama.py +178 -0
- request_llm/bridge_jittorllms_pangualpha.py +178 -0
- request_llm/{bridge_jittorllms.py → bridge_jittorllms_rwkv.py} +53 -28
- request_llm/requirements_jittorllms.txt +4 -1
- request_llm/test_llms.py +56 -5
docs/Dockerfile+JittorLLM
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# How to build | 如何构建: docker build -t gpt-academic-jittor --network=host -f Dockerfile+ChatGLM .
|
2 |
+
# How to run | (1) 我想直接一键运行(选择0号GPU): docker run --rm -it --net=host --gpus \"device=0\" gpt-academic-jittor bash
|
3 |
+
# How to run | (2) 我想运行之前进容器做一些调整(选择1号GPU): docker run --rm -it --net=host --gpus \"device=1\" gpt-academic-jittor bash
|
4 |
+
|
5 |
+
# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
6 |
+
FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
|
7 |
+
ARG useProxyNetwork=''
|
8 |
+
RUN apt-get update
|
9 |
+
RUN apt-get install -y curl proxychains curl g++
|
10 |
+
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
|
11 |
+
|
12 |
+
# 配置代理网络(构建Docker镜像时使用)
|
13 |
+
# # comment out below if you do not need proxy network | 如果不需要翻墙 - 从此行向下删除
|
14 |
+
RUN $useProxyNetwork curl cip.cc
|
15 |
+
RUN sed -i '$ d' /etc/proxychains.conf
|
16 |
+
RUN sed -i '$ d' /etc/proxychains.conf
|
17 |
+
# 在这里填写主机的代理协议(用于从github拉取代码)
|
18 |
+
RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
|
19 |
+
ARG useProxyNetwork=proxychains
|
20 |
+
# # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
|
21 |
+
|
22 |
+
|
23 |
+
# use python3 as the system default python
|
24 |
+
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
|
25 |
+
# 下载pytorch
|
26 |
+
RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
|
27 |
+
# 下载分支
|
28 |
+
WORKDIR /gpt
|
29 |
+
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b jittor
|
30 |
+
WORKDIR /gpt/chatgpt_academic
|
31 |
+
RUN $useProxyNetwork python3 -m pip install -r requirements.txt
|
32 |
+
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
|
33 |
+
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_newbing.txt
|
34 |
+
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I
|
35 |
+
|
36 |
+
# 下载JittorLLMs
|
37 |
+
RUN $useProxyNetwork git clone https://github.com/binary-husky/JittorLLMs.git --depth 1 request_llm/jittorllms
|
38 |
+
|
39 |
+
# 禁用缓存,确保更新代码
|
40 |
+
ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
|
41 |
+
RUN $useProxyNetwork git pull
|
42 |
+
|
43 |
+
# 预热Tiktoken模块
|
44 |
+
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
45 |
+
|
46 |
+
# 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤)
|
47 |
+
# 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
|
48 |
+
# LLM_MODEL 是选择初始的模型
|
49 |
+
# LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
|
50 |
+
# [说明: 以下内容与`config.py`一一对应,请查阅config.py来完成一下配置的填写]
|
51 |
+
RUN echo ' \n\
|
52 |
+
API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
|
53 |
+
USE_PROXY = True \n\
|
54 |
+
LLM_MODEL = "chatglm" \n\
|
55 |
+
LOCAL_MODEL_DEVICE = "cuda" \n\
|
56 |
+
proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py
|
57 |
+
|
58 |
+
# 启动
|
59 |
+
CMD ["python3", "-u", "main.py"]
|
request_llm/bridge_all.py
CHANGED
@@ -133,6 +133,51 @@ model_info = {
|
|
133 |
}
|
134 |
|
135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
def LLM_CATCH_EXCEPTION(f):
|
137 |
"""
|
138 |
装饰器函数,将错误显示出来
|
|
|
133 |
}
|
134 |
|
135 |
|
136 |
+
AVAIL_LLM_MODELS, = get_conf("AVAIL_LLM_MODELS")
|
137 |
+
if "jittorllms_rwkv" in AVAIL_LLM_MODELS:
|
138 |
+
from .bridge_jittorllms_rwkv import predict_no_ui_long_connection as rwkv_noui
|
139 |
+
from .bridge_jittorllms_rwkv import predict as rwkv_ui
|
140 |
+
model_info.update({
|
141 |
+
"jittorllms_rwkv": {
|
142 |
+
"fn_with_ui": rwkv_ui,
|
143 |
+
"fn_without_ui": rwkv_noui,
|
144 |
+
"endpoint": None,
|
145 |
+
"max_token": 1024,
|
146 |
+
"tokenizer": tokenizer_gpt35,
|
147 |
+
"token_cnt": get_token_num_gpt35,
|
148 |
+
},
|
149 |
+
})
|
150 |
+
if "jittorllms_llama" in AVAIL_LLM_MODELS:
|
151 |
+
from .bridge_jittorllms_llama import predict_no_ui_long_connection as llama_noui
|
152 |
+
from .bridge_jittorllms_llama import predict as llama_ui
|
153 |
+
model_info.update({
|
154 |
+
"jittorllms_llama": {
|
155 |
+
"fn_with_ui": llama_ui,
|
156 |
+
"fn_without_ui": llama_noui,
|
157 |
+
"endpoint": None,
|
158 |
+
"max_token": 1024,
|
159 |
+
"tokenizer": tokenizer_gpt35,
|
160 |
+
"token_cnt": get_token_num_gpt35,
|
161 |
+
},
|
162 |
+
})
|
163 |
+
if "jittorllms_pangualpha" in AVAIL_LLM_MODELS:
|
164 |
+
from .bridge_jittorllms_pangualpha import predict_no_ui_long_connection as pangualpha_noui
|
165 |
+
from .bridge_jittorllms_pangualpha import predict as pangualpha_ui
|
166 |
+
model_info.update({
|
167 |
+
"jittorllms_pangualpha": {
|
168 |
+
"fn_with_ui": pangualpha_ui,
|
169 |
+
"fn_without_ui": pangualpha_noui,
|
170 |
+
"endpoint": None,
|
171 |
+
"max_token": 1024,
|
172 |
+
"tokenizer": tokenizer_gpt35,
|
173 |
+
"token_cnt": get_token_num_gpt35,
|
174 |
+
},
|
175 |
+
})
|
176 |
+
|
177 |
+
|
178 |
+
|
179 |
+
|
180 |
+
|
181 |
def LLM_CATCH_EXCEPTION(f):
|
182 |
"""
|
183 |
装饰器函数,将错误显示出来
|
request_llm/bridge_jittorllms_llama.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from transformers import AutoModel, AutoTokenizer
|
3 |
+
import time
|
4 |
+
import threading
|
5 |
+
import importlib
|
6 |
+
from toolbox import update_ui, get_conf
|
7 |
+
from multiprocessing import Process, Pipe
|
8 |
+
|
9 |
+
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
10 |
+
|
11 |
+
#################################################################################
|
12 |
+
class GetGLMHandle(Process):
|
13 |
+
def __init__(self):
|
14 |
+
super().__init__(daemon=True)
|
15 |
+
self.parent, self.child = Pipe()
|
16 |
+
self.jittorllms_model = None
|
17 |
+
self.info = ""
|
18 |
+
self.local_history = []
|
19 |
+
self.success = True
|
20 |
+
self.check_dependency()
|
21 |
+
self.start()
|
22 |
+
self.threadLock = threading.Lock()
|
23 |
+
|
24 |
+
def check_dependency(self):
|
25 |
+
try:
|
26 |
+
import pandas
|
27 |
+
self.info = "依赖检测通过"
|
28 |
+
self.success = True
|
29 |
+
except:
|
30 |
+
from toolbox import trimmed_format_exc
|
31 |
+
self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
|
32 |
+
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\
|
33 |
+
r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc()
|
34 |
+
self.success = False
|
35 |
+
|
36 |
+
def ready(self):
|
37 |
+
return self.jittorllms_model is not None
|
38 |
+
|
39 |
+
def run(self):
|
40 |
+
# 子进程执行
|
41 |
+
# 第一次运行,加载参数
|
42 |
+
def validate_path():
|
43 |
+
import os, sys
|
44 |
+
dir_name = os.path.dirname(__file__)
|
45 |
+
env = os.environ.get("PATH", "")
|
46 |
+
os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
|
47 |
+
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
48 |
+
os.chdir(root_dir_assume + '/request_llm/jittorllms')
|
49 |
+
sys.path.append(root_dir_assume + '/request_llm/jittorllms')
|
50 |
+
validate_path() # validate path so you can run from base directory
|
51 |
+
|
52 |
+
def load_model():
|
53 |
+
import types
|
54 |
+
try:
|
55 |
+
if self.jittorllms_model is None:
|
56 |
+
device, = get_conf('LOCAL_MODEL_DEVICE')
|
57 |
+
from .jittorllms.models import get_model
|
58 |
+
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
59 |
+
args_dict = {'model': 'llama'}
|
60 |
+
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
61 |
+
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
62 |
+
print('done get model')
|
63 |
+
except:
|
64 |
+
self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
|
65 |
+
raise RuntimeError("不能正常加载jittorllms的参数!")
|
66 |
+
print('load_model')
|
67 |
+
load_model()
|
68 |
+
|
69 |
+
# 进入任务等待状态
|
70 |
+
print('进入任务等待状态')
|
71 |
+
while True:
|
72 |
+
# 进入任务等待状态
|
73 |
+
kwargs = self.child.recv()
|
74 |
+
query = kwargs['query']
|
75 |
+
history = kwargs['history']
|
76 |
+
# 是否重置
|
77 |
+
if len(self.local_history) > 0 and len(history)==0:
|
78 |
+
print('触发重置')
|
79 |
+
self.jittorllms_model.reset()
|
80 |
+
self.local_history.append(query)
|
81 |
+
|
82 |
+
print('收到消息,开始请求')
|
83 |
+
try:
|
84 |
+
for response in self.jittorllms_model.stream_chat(query, history):
|
85 |
+
print(response)
|
86 |
+
self.child.send(response)
|
87 |
+
except:
|
88 |
+
from toolbox import trimmed_format_exc
|
89 |
+
print(trimmed_format_exc())
|
90 |
+
self.child.send('[Local Message] Call jittorllms fail.')
|
91 |
+
# 请求处理结束,开始下一个循环
|
92 |
+
self.child.send('[Finish]')
|
93 |
+
|
94 |
+
def stream_chat(self, **kwargs):
|
95 |
+
# 主进程执行
|
96 |
+
self.threadLock.acquire()
|
97 |
+
self.parent.send(kwargs)
|
98 |
+
while True:
|
99 |
+
res = self.parent.recv()
|
100 |
+
if res != '[Finish]':
|
101 |
+
yield res
|
102 |
+
else:
|
103 |
+
break
|
104 |
+
self.threadLock.release()
|
105 |
+
|
106 |
+
global llama_glm_handle
|
107 |
+
llama_glm_handle = None
|
108 |
+
#################################################################################
|
109 |
+
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
110 |
+
"""
|
111 |
+
多线程方法
|
112 |
+
函数的说明请见 request_llm/bridge_all.py
|
113 |
+
"""
|
114 |
+
global llama_glm_handle
|
115 |
+
if llama_glm_handle is None:
|
116 |
+
llama_glm_handle = GetGLMHandle()
|
117 |
+
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + llama_glm_handle.info
|
118 |
+
if not llama_glm_handle.success:
|
119 |
+
error = llama_glm_handle.info
|
120 |
+
llama_glm_handle = None
|
121 |
+
raise RuntimeError(error)
|
122 |
+
|
123 |
+
# jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
|
124 |
+
history_feedin = []
|
125 |
+
for i in range(len(history)//2):
|
126 |
+
history_feedin.append([history[2*i], history[2*i+1]] )
|
127 |
+
|
128 |
+
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
129 |
+
response = ""
|
130 |
+
for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
131 |
+
print(response)
|
132 |
+
if len(observe_window) >= 1: observe_window[0] = response
|
133 |
+
if len(observe_window) >= 2:
|
134 |
+
if (time.time()-observe_window[1]) > watch_dog_patience:
|
135 |
+
raise RuntimeError("程序终止。")
|
136 |
+
return response
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
141 |
+
"""
|
142 |
+
单线程方法
|
143 |
+
函数的说明请见 request_llm/bridge_all.py
|
144 |
+
"""
|
145 |
+
chatbot.append((inputs, ""))
|
146 |
+
|
147 |
+
global llama_glm_handle
|
148 |
+
if llama_glm_handle is None:
|
149 |
+
llama_glm_handle = GetGLMHandle()
|
150 |
+
chatbot[-1] = (inputs, load_message + "\n\n" + llama_glm_handle.info)
|
151 |
+
yield from update_ui(chatbot=chatbot, history=[])
|
152 |
+
if not llama_glm_handle.success:
|
153 |
+
llama_glm_handle = None
|
154 |
+
return
|
155 |
+
|
156 |
+
if additional_fn is not None:
|
157 |
+
import core_functional
|
158 |
+
importlib.reload(core_functional) # 热更新prompt
|
159 |
+
core_functional = core_functional.get_core_functions()
|
160 |
+
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
161 |
+
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
162 |
+
|
163 |
+
# 处理历史信息
|
164 |
+
history_feedin = []
|
165 |
+
for i in range(len(history)//2):
|
166 |
+
history_feedin.append([history[2*i], history[2*i+1]] )
|
167 |
+
|
168 |
+
# 开始接收jittorllms的回复
|
169 |
+
response = "[Local Message]: 等待jittorllms响应中 ..."
|
170 |
+
for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
171 |
+
chatbot[-1] = (inputs, response)
|
172 |
+
yield from update_ui(chatbot=chatbot, history=history)
|
173 |
+
|
174 |
+
# 总结输出
|
175 |
+
if response == "[Local Message]: 等待jittorllms响应中 ...":
|
176 |
+
response = "[Local Message]: jittorllms响应异常 ..."
|
177 |
+
history.extend([inputs, response])
|
178 |
+
yield from update_ui(chatbot=chatbot, history=history)
|
request_llm/bridge_jittorllms_pangualpha.py
ADDED
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from transformers import AutoModel, AutoTokenizer
|
3 |
+
import time
|
4 |
+
import threading
|
5 |
+
import importlib
|
6 |
+
from toolbox import update_ui, get_conf
|
7 |
+
from multiprocessing import Process, Pipe
|
8 |
+
|
9 |
+
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
10 |
+
|
11 |
+
#################################################################################
|
12 |
+
class GetGLMHandle(Process):
|
13 |
+
def __init__(self):
|
14 |
+
super().__init__(daemon=True)
|
15 |
+
self.parent, self.child = Pipe()
|
16 |
+
self.jittorllms_model = None
|
17 |
+
self.info = ""
|
18 |
+
self.local_history = []
|
19 |
+
self.success = True
|
20 |
+
self.check_dependency()
|
21 |
+
self.start()
|
22 |
+
self.threadLock = threading.Lock()
|
23 |
+
|
24 |
+
def check_dependency(self):
|
25 |
+
try:
|
26 |
+
import pandas
|
27 |
+
self.info = "依赖检测通过"
|
28 |
+
self.success = True
|
29 |
+
except:
|
30 |
+
from toolbox import trimmed_format_exc
|
31 |
+
self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
|
32 |
+
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\
|
33 |
+
r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc()
|
34 |
+
self.success = False
|
35 |
+
|
36 |
+
def ready(self):
|
37 |
+
return self.jittorllms_model is not None
|
38 |
+
|
39 |
+
def run(self):
|
40 |
+
# 子进程执行
|
41 |
+
# 第一次运行,加载参数
|
42 |
+
def validate_path():
|
43 |
+
import os, sys
|
44 |
+
dir_name = os.path.dirname(__file__)
|
45 |
+
env = os.environ.get("PATH", "")
|
46 |
+
os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
|
47 |
+
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
48 |
+
os.chdir(root_dir_assume + '/request_llm/jittorllms')
|
49 |
+
sys.path.append(root_dir_assume + '/request_llm/jittorllms')
|
50 |
+
validate_path() # validate path so you can run from base directory
|
51 |
+
|
52 |
+
def load_model():
|
53 |
+
import types
|
54 |
+
try:
|
55 |
+
if self.jittorllms_model is None:
|
56 |
+
device, = get_conf('LOCAL_MODEL_DEVICE')
|
57 |
+
from .jittorllms.models import get_model
|
58 |
+
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
59 |
+
args_dict = {'model': 'pangualpha'}
|
60 |
+
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
61 |
+
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
62 |
+
print('done get model')
|
63 |
+
except:
|
64 |
+
self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
|
65 |
+
raise RuntimeError("不能正常加载jittorllms的参数!")
|
66 |
+
print('load_model')
|
67 |
+
load_model()
|
68 |
+
|
69 |
+
# 进入任务等待状态
|
70 |
+
print('进入任务等待状态')
|
71 |
+
while True:
|
72 |
+
# 进入任务等待状态
|
73 |
+
kwargs = self.child.recv()
|
74 |
+
query = kwargs['query']
|
75 |
+
history = kwargs['history']
|
76 |
+
# 是否重置
|
77 |
+
if len(self.local_history) > 0 and len(history)==0:
|
78 |
+
print('触发重置')
|
79 |
+
self.jittorllms_model.reset()
|
80 |
+
self.local_history.append(query)
|
81 |
+
|
82 |
+
print('收到消息,开始请求')
|
83 |
+
try:
|
84 |
+
for response in self.jittorllms_model.stream_chat(query, history):
|
85 |
+
print(response)
|
86 |
+
self.child.send(response)
|
87 |
+
except:
|
88 |
+
from toolbox import trimmed_format_exc
|
89 |
+
print(trimmed_format_exc())
|
90 |
+
self.child.send('[Local Message] Call jittorllms fail.')
|
91 |
+
# 请求处理结束,开始下一个循环
|
92 |
+
self.child.send('[Finish]')
|
93 |
+
|
94 |
+
def stream_chat(self, **kwargs):
|
95 |
+
# 主进程执行
|
96 |
+
self.threadLock.acquire()
|
97 |
+
self.parent.send(kwargs)
|
98 |
+
while True:
|
99 |
+
res = self.parent.recv()
|
100 |
+
if res != '[Finish]':
|
101 |
+
yield res
|
102 |
+
else:
|
103 |
+
break
|
104 |
+
self.threadLock.release()
|
105 |
+
|
106 |
+
global pangu_glm_handle
|
107 |
+
pangu_glm_handle = None
|
108 |
+
#################################################################################
|
109 |
+
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
110 |
+
"""
|
111 |
+
多线程方法
|
112 |
+
函数的说明请见 request_llm/bridge_all.py
|
113 |
+
"""
|
114 |
+
global pangu_glm_handle
|
115 |
+
if pangu_glm_handle is None:
|
116 |
+
pangu_glm_handle = GetGLMHandle()
|
117 |
+
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + pangu_glm_handle.info
|
118 |
+
if not pangu_glm_handle.success:
|
119 |
+
error = pangu_glm_handle.info
|
120 |
+
pangu_glm_handle = None
|
121 |
+
raise RuntimeError(error)
|
122 |
+
|
123 |
+
# jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
|
124 |
+
history_feedin = []
|
125 |
+
for i in range(len(history)//2):
|
126 |
+
history_feedin.append([history[2*i], history[2*i+1]] )
|
127 |
+
|
128 |
+
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
129 |
+
response = ""
|
130 |
+
for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
131 |
+
print(response)
|
132 |
+
if len(observe_window) >= 1: observe_window[0] = response
|
133 |
+
if len(observe_window) >= 2:
|
134 |
+
if (time.time()-observe_window[1]) > watch_dog_patience:
|
135 |
+
raise RuntimeError("程序终止。")
|
136 |
+
return response
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
+
def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
|
141 |
+
"""
|
142 |
+
单线程方法
|
143 |
+
函数的说明请见 request_llm/bridge_all.py
|
144 |
+
"""
|
145 |
+
chatbot.append((inputs, ""))
|
146 |
+
|
147 |
+
global pangu_glm_handle
|
148 |
+
if pangu_glm_handle is None:
|
149 |
+
pangu_glm_handle = GetGLMHandle()
|
150 |
+
chatbot[-1] = (inputs, load_message + "\n\n" + pangu_glm_handle.info)
|
151 |
+
yield from update_ui(chatbot=chatbot, history=[])
|
152 |
+
if not pangu_glm_handle.success:
|
153 |
+
pangu_glm_handle = None
|
154 |
+
return
|
155 |
+
|
156 |
+
if additional_fn is not None:
|
157 |
+
import core_functional
|
158 |
+
importlib.reload(core_functional) # 热更新prompt
|
159 |
+
core_functional = core_functional.get_core_functions()
|
160 |
+
if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
161 |
+
inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
|
162 |
+
|
163 |
+
# 处理历史信息
|
164 |
+
history_feedin = []
|
165 |
+
for i in range(len(history)//2):
|
166 |
+
history_feedin.append([history[2*i], history[2*i+1]] )
|
167 |
+
|
168 |
+
# 开始接收jittorllms的回复
|
169 |
+
response = "[Local Message]: 等待jittorllms响应中 ..."
|
170 |
+
for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
171 |
+
chatbot[-1] = (inputs, response)
|
172 |
+
yield from update_ui(chatbot=chatbot, history=history)
|
173 |
+
|
174 |
+
# 总结输出
|
175 |
+
if response == "[Local Message]: 等待jittorllms响应中 ...":
|
176 |
+
response = "[Local Message]: jittorllms响应异常 ..."
|
177 |
+
history.extend([inputs, response])
|
178 |
+
yield from update_ui(chatbot=chatbot, history=history)
|
request_llm/{bridge_jittorllms.py → bridge_jittorllms_rwkv.py}
RENAMED
@@ -6,7 +6,7 @@ import importlib
|
|
6 |
from toolbox import update_ui, get_conf
|
7 |
from multiprocessing import Process, Pipe
|
8 |
|
9 |
-
load_message = "jittorllms
|
10 |
|
11 |
#################################################################################
|
12 |
class GetGLMHandle(Process):
|
@@ -15,6 +15,7 @@ class GetGLMHandle(Process):
|
|
15 |
self.parent, self.child = Pipe()
|
16 |
self.jittorllms_model = None
|
17 |
self.info = ""
|
|
|
18 |
self.success = True
|
19 |
self.check_dependency()
|
20 |
self.start()
|
@@ -22,13 +23,14 @@ class GetGLMHandle(Process):
|
|
22 |
|
23 |
def check_dependency(self):
|
24 |
try:
|
25 |
-
import
|
26 |
-
from .jittorllms.models import get_model
|
27 |
self.info = "依赖检测通过"
|
28 |
self.success = True
|
29 |
except:
|
30 |
-
|
31 |
-
|
|
|
|
|
32 |
self.success = False
|
33 |
|
34 |
def ready(self):
|
@@ -37,6 +39,16 @@ class GetGLMHandle(Process):
|
|
37 |
def run(self):
|
38 |
# 子进程执行
|
39 |
# 第一次运行,加载参数
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
def load_model():
|
41 |
import types
|
42 |
try:
|
@@ -44,23 +56,37 @@ class GetGLMHandle(Process):
|
|
44 |
device, = get_conf('LOCAL_MODEL_DEVICE')
|
45 |
from .jittorllms.models import get_model
|
46 |
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
47 |
-
args_dict = {'model': '
|
|
|
48 |
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
|
|
49 |
except:
|
50 |
self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
|
51 |
raise RuntimeError("不能正常加载jittorllms的参数!")
|
52 |
-
|
53 |
load_model()
|
54 |
|
55 |
# 进入任务等待状态
|
|
|
56 |
while True:
|
57 |
# 进入任务等待状态
|
58 |
kwargs = self.child.recv()
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
try:
|
61 |
-
for response
|
|
|
62 |
self.child.send(response)
|
63 |
except:
|
|
|
|
|
64 |
self.child.send('[Local Message] Call jittorllms fail.')
|
65 |
# 请求处理结束,开始下一个循环
|
66 |
self.child.send('[Finish]')
|
@@ -77,32 +103,32 @@ class GetGLMHandle(Process):
|
|
77 |
break
|
78 |
self.threadLock.release()
|
79 |
|
80 |
-
global
|
81 |
-
|
82 |
#################################################################################
|
83 |
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
84 |
"""
|
85 |
多线程方法
|
86 |
函数的说明请见 request_llm/bridge_all.py
|
87 |
"""
|
88 |
-
global
|
89 |
-
if
|
90 |
-
|
91 |
-
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" +
|
92 |
-
if not
|
93 |
-
error =
|
94 |
-
|
95 |
raise RuntimeError(error)
|
96 |
|
97 |
# jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
|
98 |
history_feedin = []
|
99 |
-
history_feedin.append(["What can I do?", sys_prompt])
|
100 |
for i in range(len(history)//2):
|
101 |
history_feedin.append([history[2*i], history[2*i+1]] )
|
102 |
|
103 |
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
104 |
response = ""
|
105 |
-
for response in
|
|
|
106 |
if len(observe_window) >= 1: observe_window[0] = response
|
107 |
if len(observe_window) >= 2:
|
108 |
if (time.time()-observe_window[1]) > watch_dog_patience:
|
@@ -118,13 +144,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
118 |
"""
|
119 |
chatbot.append((inputs, ""))
|
120 |
|
121 |
-
global
|
122 |
-
if
|
123 |
-
|
124 |
-
chatbot[-1] = (inputs, load_message + "\n\n" +
|
125 |
yield from update_ui(chatbot=chatbot, history=[])
|
126 |
-
if not
|
127 |
-
|
128 |
return
|
129 |
|
130 |
if additional_fn is not None:
|
@@ -136,13 +162,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
|
|
136 |
|
137 |
# 处理历史信息
|
138 |
history_feedin = []
|
139 |
-
history_feedin.append(["What can I do?", system_prompt] )
|
140 |
for i in range(len(history)//2):
|
141 |
history_feedin.append([history[2*i], history[2*i+1]] )
|
142 |
|
143 |
# 开始接收jittorllms的回复
|
144 |
response = "[Local Message]: 等待jittorllms响应中 ..."
|
145 |
-
for response in
|
146 |
chatbot[-1] = (inputs, response)
|
147 |
yield from update_ui(chatbot=chatbot, history=history)
|
148 |
|
|
|
6 |
from toolbox import update_ui, get_conf
|
7 |
from multiprocessing import Process, Pipe
|
8 |
|
9 |
+
load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
|
10 |
|
11 |
#################################################################################
|
12 |
class GetGLMHandle(Process):
|
|
|
15 |
self.parent, self.child = Pipe()
|
16 |
self.jittorllms_model = None
|
17 |
self.info = ""
|
18 |
+
self.local_history = []
|
19 |
self.success = True
|
20 |
self.check_dependency()
|
21 |
self.start()
|
|
|
23 |
|
24 |
def check_dependency(self):
|
25 |
try:
|
26 |
+
import pandas
|
|
|
27 |
self.info = "依赖检测通过"
|
28 |
self.success = True
|
29 |
except:
|
30 |
+
from toolbox import trimmed_format_exc
|
31 |
+
self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\
|
32 |
+
r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\
|
33 |
+
r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc()
|
34 |
self.success = False
|
35 |
|
36 |
def ready(self):
|
|
|
39 |
def run(self):
|
40 |
# 子进程执行
|
41 |
# 第一次运行,加载参数
|
42 |
+
def validate_path():
|
43 |
+
import os, sys
|
44 |
+
dir_name = os.path.dirname(__file__)
|
45 |
+
env = os.environ.get("PATH", "")
|
46 |
+
os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin')
|
47 |
+
root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
48 |
+
os.chdir(root_dir_assume + '/request_llm/jittorllms')
|
49 |
+
sys.path.append(root_dir_assume + '/request_llm/jittorllms')
|
50 |
+
validate_path() # validate path so you can run from base directory
|
51 |
+
|
52 |
def load_model():
|
53 |
import types
|
54 |
try:
|
|
|
56 |
device, = get_conf('LOCAL_MODEL_DEVICE')
|
57 |
from .jittorllms.models import get_model
|
58 |
# availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
59 |
+
args_dict = {'model': 'chatrwkv'}
|
60 |
+
print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
61 |
self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
62 |
+
print('done get model')
|
63 |
except:
|
64 |
self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
|
65 |
raise RuntimeError("不能正常加载jittorllms的参数!")
|
66 |
+
print('load_model')
|
67 |
load_model()
|
68 |
|
69 |
# 进入任务等待状态
|
70 |
+
print('进入任务等待状态')
|
71 |
while True:
|
72 |
# 进入任务等待状态
|
73 |
kwargs = self.child.recv()
|
74 |
+
query = kwargs['query']
|
75 |
+
history = kwargs['history']
|
76 |
+
# 是否重置
|
77 |
+
if len(self.local_history) > 0 and len(history)==0:
|
78 |
+
print('触发重置')
|
79 |
+
self.jittorllms_model.reset()
|
80 |
+
self.local_history.append(query)
|
81 |
+
|
82 |
+
print('收到消息,开始请求')
|
83 |
try:
|
84 |
+
for response in self.jittorllms_model.stream_chat(query, history):
|
85 |
+
print(response)
|
86 |
self.child.send(response)
|
87 |
except:
|
88 |
+
from toolbox import trimmed_format_exc
|
89 |
+
print(trimmed_format_exc())
|
90 |
self.child.send('[Local Message] Call jittorllms fail.')
|
91 |
# 请求处理结束,开始下一个循环
|
92 |
self.child.send('[Finish]')
|
|
|
103 |
break
|
104 |
self.threadLock.release()
|
105 |
|
106 |
+
global rwkv_glm_handle
|
107 |
+
rwkv_glm_handle = None
|
108 |
#################################################################################
|
109 |
def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
|
110 |
"""
|
111 |
多线程方法
|
112 |
函数的说明请见 request_llm/bridge_all.py
|
113 |
"""
|
114 |
+
global rwkv_glm_handle
|
115 |
+
if rwkv_glm_handle is None:
|
116 |
+
rwkv_glm_handle = GetGLMHandle()
|
117 |
+
if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + rwkv_glm_handle.info
|
118 |
+
if not rwkv_glm_handle.success:
|
119 |
+
error = rwkv_glm_handle.info
|
120 |
+
rwkv_glm_handle = None
|
121 |
raise RuntimeError(error)
|
122 |
|
123 |
# jittorllms 没有 sys_prompt 接口,因此把prompt加入 history
|
124 |
history_feedin = []
|
|
|
125 |
for i in range(len(history)//2):
|
126 |
history_feedin.append([history[2*i], history[2*i+1]] )
|
127 |
|
128 |
watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
|
129 |
response = ""
|
130 |
+
for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
131 |
+
print(response)
|
132 |
if len(observe_window) >= 1: observe_window[0] = response
|
133 |
if len(observe_window) >= 2:
|
134 |
if (time.time()-observe_window[1]) > watch_dog_patience:
|
|
|
144 |
"""
|
145 |
chatbot.append((inputs, ""))
|
146 |
|
147 |
+
global rwkv_glm_handle
|
148 |
+
if rwkv_glm_handle is None:
|
149 |
+
rwkv_glm_handle = GetGLMHandle()
|
150 |
+
chatbot[-1] = (inputs, load_message + "\n\n" + rwkv_glm_handle.info)
|
151 |
yield from update_ui(chatbot=chatbot, history=[])
|
152 |
+
if not rwkv_glm_handle.success:
|
153 |
+
rwkv_glm_handle = None
|
154 |
return
|
155 |
|
156 |
if additional_fn is not None:
|
|
|
162 |
|
163 |
# 处理历史信息
|
164 |
history_feedin = []
|
|
|
165 |
for i in range(len(history)//2):
|
166 |
history_feedin.append([history[2*i], history[2*i+1]] )
|
167 |
|
168 |
# 开始接收jittorllms的回复
|
169 |
response = "[Local Message]: 等待jittorllms响应中 ..."
|
170 |
+
for response in rwkv_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
|
171 |
chatbot[-1] = (inputs, response)
|
172 |
yield from update_ui(chatbot=chatbot, history=history)
|
173 |
|
request_llm/requirements_jittorllms.txt
CHANGED
@@ -1,4 +1,7 @@
|
|
1 |
jittor >= 1.3.7.9
|
2 |
jtorch >= 0.1.3
|
3 |
torch
|
4 |
-
torchvision
|
|
|
|
|
|
|
|
1 |
jittor >= 1.3.7.9
|
2 |
jtorch >= 0.1.3
|
3 |
torch
|
4 |
+
torchvision
|
5 |
+
transformers==4.26.1
|
6 |
+
pandas
|
7 |
+
jieba
|
request_llm/test_llms.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
"""
|
2 |
-
对各个llm模型进行单元测试
|
3 |
-
"""
|
4 |
def validate_path():
|
5 |
import os, sys
|
6 |
dir_name = os.path.dirname(__file__)
|
@@ -10,7 +10,9 @@ def validate_path():
|
|
10 |
|
11 |
validate_path() # validate path so you can run from base directory
|
12 |
|
13 |
-
from request_llm.
|
|
|
|
|
14 |
|
15 |
llm_kwargs = {
|
16 |
'max_length': 512,
|
@@ -22,5 +24,54 @@ result = predict_no_ui_long_connection(inputs="你好",
|
|
22 |
llm_kwargs=llm_kwargs,
|
23 |
history=[],
|
24 |
sys_prompt="")
|
|
|
25 |
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# """
|
2 |
+
# 对各个llm模型进行单元测试
|
3 |
+
# """
|
4 |
def validate_path():
|
5 |
import os, sys
|
6 |
dir_name = os.path.dirname(__file__)
|
|
|
10 |
|
11 |
validate_path() # validate path so you can run from base directory
|
12 |
|
13 |
+
from request_llm.bridge_jittorllms_rwkv import predict_no_ui_long_connection
|
14 |
+
# from request_llm.bridge_jittorllms_pangualpha import predict_no_ui_long_connection
|
15 |
+
# from request_llm.bridge_jittorllms_llama import predict_no_ui_long_connection
|
16 |
|
17 |
llm_kwargs = {
|
18 |
'max_length': 512,
|
|
|
24 |
llm_kwargs=llm_kwargs,
|
25 |
history=[],
|
26 |
sys_prompt="")
|
27 |
+
print('final result:', result)
|
28 |
|
29 |
+
|
30 |
+
result = predict_no_ui_long_connection(inputs="what is a hero?",
|
31 |
+
llm_kwargs=llm_kwargs,
|
32 |
+
history=["hello world"],
|
33 |
+
sys_prompt="")
|
34 |
+
print('final result:', result)
|
35 |
+
|
36 |
+
result = predict_no_ui_long_connection(inputs="如何理解传奇?",
|
37 |
+
llm_kwargs=llm_kwargs,
|
38 |
+
history=[],
|
39 |
+
sys_prompt="")
|
40 |
+
print('final result:', result)
|
41 |
+
|
42 |
+
# # print(result)
|
43 |
+
# from multiprocessing import Process, Pipe
|
44 |
+
# class GetGLMHandle(Process):
|
45 |
+
# def __init__(self):
|
46 |
+
# super().__init__(daemon=True)
|
47 |
+
# pass
|
48 |
+
# def run(self):
|
49 |
+
# # 子进程执行
|
50 |
+
# # 第一次运行,加载参数
|
51 |
+
# def validate_path():
|
52 |
+
# import os, sys
|
53 |
+
# dir_name = os.path.dirname(__file__)
|
54 |
+
# root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..')
|
55 |
+
# os.chdir(root_dir_assume + '/request_llm/jittorllms')
|
56 |
+
# sys.path.append(root_dir_assume + '/request_llm/jittorllms')
|
57 |
+
# validate_path() # validate path so you can run from base directory
|
58 |
+
|
59 |
+
# jittorllms_model = None
|
60 |
+
# import types
|
61 |
+
# try:
|
62 |
+
# if jittorllms_model is None:
|
63 |
+
# from models import get_model
|
64 |
+
# # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"]
|
65 |
+
# args_dict = {'model': 'chatrwkv'}
|
66 |
+
# print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))')
|
67 |
+
# jittorllms_model = get_model(types.SimpleNamespace(**args_dict))
|
68 |
+
# print('done get model')
|
69 |
+
# except:
|
70 |
+
# # self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。')
|
71 |
+
# raise RuntimeError("不能正常加载jittorllms的参数!")
|
72 |
+
|
73 |
+
# x = GetGLMHandle()
|
74 |
+
# x.start()
|
75 |
+
|
76 |
+
|
77 |
+
# input()
|