Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -78,20 +78,18 @@ css = """
|
|
78 |
.message { font-size: 1.2em; }
|
79 |
"""
|
80 |
|
81 |
-
with gr.Blocks(css=css) as demo:
|
82 |
-
|
83 |
-
|
84 |
-
gr.Markdown("""# <p align="center">Sydney-AI <p> <b>""")
|
85 |
state = gr.State(get_empty_state())
|
86 |
|
87 |
|
88 |
with gr.Column(elem_id="col-container"):
|
89 |
gr.Markdown("""
|
90 |
-
#
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
|
96 |
with gr.Row():
|
97 |
with gr.Column():
|
@@ -110,22 +108,17 @@ with gr.Blocks(css=css) as demo:
|
|
110 |
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
|
111 |
# gr.Markdown("",elem_id="header")
|
112 |
|
113 |
-
# gr.Markdown("""
|
114 |
-
# <div align=center>Sydney-AI正在升级改版,优化输出显示和promp问题,新版近期上线,请关注[45度科研人]公众号信息!</div>
|
115 |
-
# <div align=center><img width = '200' height ='200' src ="https://dunazo.oss-cn-beijing.aliyuncs.com/blog/wechat-simple.png"/></div>""", elem_id="header")
|
116 |
-
|
117 |
gr.Markdown("""
|
118 |
you can follow the WeChat public account [45度科研人] and leave me a message!
|
119 |
<div align=center><img width = '200' height ='200' src ="https://dunazo.oss-cn-beijing.aliyuncs.com/blog/wechat-simple.png"/></div>""", elem_id="header")
|
120 |
-
|
121 |
btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
122 |
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
123 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
124 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
125 |
user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
126 |
|
127 |
-
|
128 |
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
|
129 |
|
130 |
|
131 |
-
demo.launch(debug=
|
|
|
78 |
.message { font-size: 1.2em; }
|
79 |
"""
|
80 |
|
81 |
+
with gr.Blocks(css=css) as demo:
|
82 |
+
|
|
|
|
|
83 |
state = gr.State(get_empty_state())
|
84 |
|
85 |
|
86 |
with gr.Column(elem_id="col-container"):
|
87 |
gr.Markdown("""
|
88 |
+
# Sydney-AI <b>
|
89 |
+
<p align="left"> This app is an intelligent online chat app developed based on the newly released OpenAI API "gpt-3.5-turbo". The app's operating costs are sponsored by "45度科研人". Currently, the tokens is limited to 3000. If you want to remove this restriction, you can input your own OpenAI API key.<p>
|
90 |
+
<p align="left"> The default model role of the app is the original assistant of ChatGPT, but you can also choose from the provided roles. <p>
|
91 |
+
<p align="left"> Two adjustable parameters are provided to optimize the model: temperature, where a larger value leads to more creative replies, and max tokens, where a larger value allows the model to reply with more content. <p>
|
92 |
+
APP link: https://junchuanyu-sydney-ai.hf.space""",elem_id="header")
|
93 |
|
94 |
with gr.Row():
|
95 |
with gr.Column():
|
|
|
108 |
max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response")
|
109 |
# gr.Markdown("",elem_id="header")
|
110 |
|
|
|
|
|
|
|
|
|
111 |
gr.Markdown("""
|
112 |
you can follow the WeChat public account [45度科研人] and leave me a message!
|
113 |
<div align=center><img width = '200' height ='200' src ="https://dunazo.oss-cn-beijing.aliyuncs.com/blog/wechat-simple.png"/></div>""", elem_id="header")
|
|
|
114 |
btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
115 |
input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state])
|
116 |
btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state])
|
117 |
prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
|
118 |
user_token.change(on_token_change, inputs=[user_token], outputs=[])
|
119 |
|
120 |
+
|
121 |
demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template])
|
122 |
|
123 |
|
124 |
+
demo.launch(debug=True, height='800px')
|