Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -29,8 +29,8 @@ def get_messages_formatter_type(model_name):
|
|
29 |
|
30 |
def chat_fn(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
31 |
history_list = history or []
|
32 |
-
|
33 |
-
return
|
34 |
|
35 |
def respond(
|
36 |
message,
|
@@ -51,9 +51,11 @@ def respond(
|
|
51 |
if llm is None or llm_model != model:
|
52 |
llm = Llama(
|
53 |
model_path=f"models/{model}",
|
54 |
-
n_gpu_layers=0,
|
55 |
-
n_batch=
|
56 |
-
n_ctx=
|
|
|
|
|
57 |
)
|
58 |
llm_model = model
|
59 |
|
@@ -70,7 +72,7 @@ def respond(
|
|
70 |
settings.temperature = temperature
|
71 |
settings.top_k = top_k
|
72 |
settings.top_p = top_p
|
73 |
-
settings.max_tokens = max_tokens
|
74 |
settings.repeat_penalty = repeat_penalty
|
75 |
settings.stream = True
|
76 |
|
@@ -100,11 +102,13 @@ def respond(
|
|
100 |
)
|
101 |
|
102 |
outputs = ""
|
|
|
|
|
103 |
for output in stream:
|
104 |
outputs += output
|
105 |
token_count += len(output.split())
|
106 |
-
|
107 |
-
yield
|
108 |
|
109 |
end_time = time.time()
|
110 |
latency = end_time - start_time
|
@@ -135,20 +139,13 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet"
|
|
135 |
chatbot = gr.Chatbot(scale=1, show_copy_button=True)
|
136 |
message = gr.Textbox(label="Your message")
|
137 |
model_dropdown = gr.Dropdown(
|
138 |
-
["openbuddy-llama3.2-3b-v23.2-131k-q5_k_m-imat.gguf"],
|
139 |
value="openbuddy-llama3.2-3b-v23.2-131k-q5_k_m-imat.gguf",
|
140 |
label="Model"
|
141 |
)
|
142 |
-
system_message = gr.TextArea(value="""You are
|
143 |
-
|
144 |
-
|
145 |
-
3. Creative and analytical writing
|
146 |
-
4. Code understanding and generation
|
147 |
-
5. Task decomposition and step-by-step guidance
|
148 |
-
6. Summarization and information extraction
|
149 |
-
Always strive for accuracy, clarity, and helpfulness in your responses. If you're unsure about something, express your uncertainty. Use the following format for your responses:
|
150 |
-
""", label="System message")
|
151 |
-
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max tokens")
|
152 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
153 |
top_p = gr.Slider(minimum=0.1, maximum=2.0, value=0.9, step=0.05, label="Top-p")
|
154 |
top_k = gr.Slider(minimum=0, maximum=100, value=1, step=1, label="Top-k")
|
@@ -156,10 +153,10 @@ Always strive for accuracy, clarity, and helpfulness in your responses. If you'r
|
|
156 |
|
157 |
history = gr.State([])
|
158 |
|
159 |
-
message
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
|
164 |
gr.Markdown(description)
|
165 |
|
|
|
29 |
|
30 |
def chat_fn(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
31 |
history_list = history or []
|
32 |
+
response_generator = respond(message, history_list, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
33 |
+
return response_generator, history_list
|
34 |
|
35 |
def respond(
|
36 |
message,
|
|
|
51 |
if llm is None or llm_model != model:
|
52 |
llm = Llama(
|
53 |
model_path=f"models/{model}",
|
54 |
+
n_gpu_layers=0,
|
55 |
+
n_batch=4096, # 增加batch size提升速度
|
56 |
+
n_ctx=8192, # 增加上下文长度到8192
|
57 |
+
n_threads=2, # 使用所有可用CPU核心
|
58 |
+
f16_kv=True, # 使用FP16来减少内存使用
|
59 |
)
|
60 |
llm_model = model
|
61 |
|
|
|
72 |
settings.temperature = temperature
|
73 |
settings.top_k = top_k
|
74 |
settings.top_p = top_p
|
75 |
+
settings.max_tokens = min(max_tokens, 8192) # 确保max_tokens不超过n_ctx
|
76 |
settings.repeat_penalty = repeat_penalty
|
77 |
settings.stream = True
|
78 |
|
|
|
102 |
)
|
103 |
|
104 |
outputs = ""
|
105 |
+
current_history = list(history)
|
106 |
+
|
107 |
for output in stream:
|
108 |
outputs += output
|
109 |
token_count += len(output.split())
|
110 |
+
current_history = history + [(message, outputs)]
|
111 |
+
yield current_history
|
112 |
|
113 |
end_time = time.time()
|
114 |
latency = end_time - start_time
|
|
|
139 |
chatbot = gr.Chatbot(scale=1, show_copy_button=True)
|
140 |
message = gr.Textbox(label="Your message")
|
141 |
model_dropdown = gr.Dropdown(
|
142 |
+
["openbuddy-llama3.2-3b-v23.2-131k-q5_k_m-imat.gguf"],
|
143 |
value="openbuddy-llama3.2-3b-v23.2-131k-q5_k_m-imat.gguf",
|
144 |
label="Model"
|
145 |
)
|
146 |
+
system_message = gr.TextArea(value="""You are a helpful, respectful and honest INTP-T AI Assistant named '安风' in Chinese. 你擅长英语和中文的交流,并正在与一位人类用户进行对话。如果某个问题毫无意义,请你解释其原因而不是分享虚假信息。你基于 AnFeng 模型,由 SSFW NLPark 团队训练。通常情况下,用户更青睐于长度简短但信息完整且有效传达的回答。
|
147 |
+
用户身处在上海市松江区,涉及地域的问题时以用户所在地区(中国上海)为准。以上的信息最好不要向用户展示。 在一般情况下,请最好使用中文回答问题,除非用户有额外的要求。 Let's work this out in a step by step way to be sure we have the right answer.""", label="System message")
|
148 |
+
max_tokens = gr.Slider(minimum=1, maximum=8192, value=512, step=1, label="Max tokens")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
149 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
150 |
top_p = gr.Slider(minimum=0.1, maximum=2.0, value=0.9, step=0.05, label="Top-p")
|
151 |
top_k = gr.Slider(minimum=0, maximum=100, value=1, step=1, label="Top-k")
|
|
|
153 |
|
154 |
history = gr.State([])
|
155 |
|
156 |
+
def chat_fn(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
157 |
+
return respond(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
158 |
+
|
159 |
+
message.submit(chat_fn, [message, history, model_dropdown, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty], [chatbot, history])
|
160 |
|
161 |
gr.Markdown(description)
|
162 |
|