Spaces:
Running
Running
lmt
commited on
Commit
·
4453718
1
Parent(s):
eea4e9d
增加超时时间
Browse files- app.py +1 -1
- presets.py +2 -1
app.py
CHANGED
@@ -73,7 +73,7 @@ with gr.Blocks(css=customCSS) as server:
|
|
73 |
historyRefreshBtn = gr.Button("🔄 刷新")
|
74 |
historyReadBtn = gr.Button("📂 读入对话")
|
75 |
# inputs, top_p, temperature, top_k, repetition_penalty
|
76 |
-
with gr.Accordion("参数", open=False):
|
77 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
|
78 |
interactive=True, label="Top-p (nucleus sampling)",)
|
79 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0,
|
|
|
73 |
historyRefreshBtn = gr.Button("🔄 刷新")
|
74 |
historyReadBtn = gr.Button("📂 读入对话")
|
75 |
# inputs, top_p, temperature, top_k, repetition_penalty
|
76 |
+
with gr.Accordion("参数", open=False, visible=False):
|
77 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
|
78 |
interactive=True, label="Top-p (nucleus sampling)",)
|
79 |
temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0,
|
presets.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
# -*- coding:utf-8 -*-
|
2 |
title = """<h1 align="center">MyChatGPT</h1>"""
|
3 |
description = """<div align=center>
|
|
|
4 |
Powered by `gpt-3.5-turbo` Model
|
5 |
</div>
|
6 |
"""
|
@@ -36,7 +37,7 @@ standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
|
|
36 |
error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
|
37 |
summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
|
38 |
max_token_streaming = 3500 # 流式对话时的最大 token 数
|
39 |
-
timeout_streaming =
|
40 |
max_token_all = 3500 # 非流式对话时的最大 token 数
|
41 |
timeout_all = 200 # 非流式对话时的超时时间
|
42 |
enable_streaming_option = False # 是否启用选择选择是否实时显示回答的勾选框
|
|
|
1 |
# -*- coding:utf-8 -*-
|
2 |
title = """<h1 align="center">MyChatGPT</h1>"""
|
3 |
description = """<div align=center>
|
4 |
+
|
5 |
Powered by `gpt-3.5-turbo` Model
|
6 |
</div>
|
7 |
"""
|
|
|
37 |
error_retrieve_prompt = "连接超时,无法获取对话。请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
|
38 |
summarize_prompt = "请总结以上对话,不超过100字。" # 总结对话时的 prompt
|
39 |
max_token_streaming = 3500 # 流式对话时的最大 token 数
|
40 |
+
timeout_streaming = 30 # 流式对话时的超时时间
|
41 |
max_token_all = 3500 # 非流式对话时的最大 token 数
|
42 |
timeout_all = 200 # 非流式对话时的超时时间
|
43 |
enable_streaming_option = False # 是否启用选择选择是否实时显示回答的勾选框
|