kevinwang676 commited on
Commit
bacca9b
1 Parent(s): dcef39a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -11
app.py CHANGED
@@ -305,24 +305,22 @@ async def text_to_speech_edge(text, language_code):
305
 
306
 
307
  with gr.Blocks(title="ChatGLM2-6B-int4", theme=gr.themes.Soft(text_size="sm")) as demo:
308
- # gr.HTML("""<h1 align="center">ChatGLM2-6B-int4</h1>""")
 
 
 
 
 
309
  gr.HTML(
310
- """<center><a href="https://huggingface.co/spaces/mikeee/chatglm2-6b-4bit?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>To avoid the queue and for faster inference Duplicate this Space and upgrade to GPU</center>"""
311
  )
312
 
 
313
  with gr.Accordion("📒 相关信息", open=False):
314
- _ = f"""
315
- ## {model_name}
316
- Try to refresh the browser and try again when occasionally an error occurs.
317
- With a GPU, a query takes from a few seconds to a few tens of seconds, dependent on the number of words/characters
318
- the question and responses contain. The quality of the responses varies quite a bit it seems. Even the same
319
- question with the same parameters, asked at different times, can result in quite different responses.
320
  * Low temperature: responses will be more deterministic and focused; High temperature: responses more creative.
321
  * Suggested temperatures -- translation: up to 0.3; chatting: > 0.4
322
  * Top P controls dynamic vocabulary selection based on context.
323
- For a table of example values for different scenarios, refer to [this](https://community.openai.com/t/cheat-sheet-mastering-temperature-and-top-p-in-chatgpt-api-a-few-tips-and-tricks-on-controlling-the-creativity-deterministic-output-of-prompt-responses/172683)
324
- If the instance is not on a GPU (T4), it will be very slow. You can try to run the colab notebook [chatglm2-6b-4bit colab notebook](https://colab.research.google.com/drive/1WkF7kOjVCcBBatDHjaGkuJHnPdMWNtbW?usp=sharing) for a spin.
325
- The T4 GPU is sponsored by a community GPU grant from Huggingface. Thanks a lot!
326
  """
327
  gr.Markdown(dedent(_))
328
  chatbot = gr.Chatbot()
 
305
 
306
 
307
  with gr.Blocks(title="ChatGLM2-6B-int4", theme=gr.themes.Soft(text_size="sm")) as demo:
308
+ gr.HTML("<center>"
309
+ "<h1>🥳💕🎶 - ChatGLM2 + 声音克隆:和你喜欢的角色畅所欲言吧!</h1>"
310
+ "</center>")
311
+ gr.Markdown("### <center>💡 - 第二代ChatGLm大语言模型 + FreeVC变声,为您打造独一无二的沉浸式对话体验</center>")
312
+ gr.Markdown("### <center>🌊 - 更多精彩应用,敬请关注[滔滔AI](http://www.talktalkai.com);滔滔AI,为爱滔滔!💕</center>")
313
+
314
  gr.HTML(
315
+ """<center><a href="https://huggingface.co/spaces/kevinwang676/FreeVC?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>点击此按钮即可复制该程序;切换到GPU环境后,就可以更快运行GLM2</center>"""
316
  )
317
 
318
+
319
  with gr.Accordion("📒 相关信息", open=False):
320
+ _ = f""" ChatGLM2 可选参数的信息:
 
 
 
 
 
321
  * Low temperature: responses will be more deterministic and focused; High temperature: responses more creative.
322
  * Suggested temperatures -- translation: up to 0.3; chatting: > 0.4
323
  * Top P controls dynamic vocabulary selection based on context.
 
 
 
324
  """
325
  gr.Markdown(dedent(_))
326
  chatbot = gr.Chatbot()