gururise commited on
Commit
5430a9c
·
1 Parent(s): fba8532

update desc and handle blank username in chat

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -17,10 +17,14 @@ import gc
17
 
18
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
19
 
 
 
 
 
 
20
  def to_md(text):
21
  return text.replace("\n", "<br />")
22
 
23
-
24
  def get_model():
25
  model = None
26
  model = RWKV(
@@ -124,6 +128,9 @@ def chat(
124
  torch.cuda.empty_cache()
125
  model = get_model()
126
 
 
 
 
127
  intro = f'''The following is a verbose and detailed conversation between an AI assistant called FRITZ, and a human user called USER. FRITZ is intelligent, knowledgeable, wise and polite.
128
 
129
  {username}: What year was the french revolution?
@@ -219,9 +226,7 @@ Best Full Response:
219
 
220
  iface = gr.Interface(
221
  fn=infer,
222
- description='''<p>RNN With Transformer-level LLM Performance. (<a href='https://github.com/BlinkDL/RWKV-LM'>github</a>)
223
- According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding"
224
- <p>Thanks to <a href='https://www.rftcapital.com'>RFT Capital</a> for donating compute capability for our experiments. Additional thanks to the author of the <a href="https://github.com/harrisonvanderbyl/rwkvstic">rwkvstic</a> library.</p>''',
225
  allow_flagging="never",
226
  inputs=[
227
  gr.Textbox(lines=20, label="Prompt"), # prompt
@@ -238,9 +243,7 @@ iface = gr.Interface(
238
 
239
  chatiface = gr.Interface(
240
  fn=chat,
241
- description='''<p>RNN With Transformer-level LLM Performance. (<a href='https://github.com/BlinkDL/RWKV-LM'>github</a>)
242
- According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding"
243
- <p>Thanks to <a href='https://www.rftcapital.com'>RFT Capital</a> for donating compute capability for our experiments. Additional thanks to the author of the <a href="https://github.com/harrisonvanderbyl/rwkvstic">rwkvstic</a> library.</p>''',
244
  allow_flagging="never",
245
  inputs=[
246
  gr.Textbox(lines=5, label="Message"), # prompt
 
17
 
18
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
19
 
20
+ desc = '''<p>RNN with Transformer-level LLM Performance (<a href='https://github.com/BlinkDL/RWKV-LM'>github</a>).
21
+ According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding."'''
22
+
23
+ thanks = '''<p>Thanks to <a href='https://www.rftcapital.com'>RFT Capital</a> for donating compute capability for our experiments. Additional thanks to the author of the <a href="https://github.com/harrisonvanderbyl/rwkvstic">rwkvstic</a> library.</p>'''
24
+
25
  def to_md(text):
26
  return text.replace("\n", "<br />")
27
 
 
28
  def get_model():
29
  model = None
30
  model = RWKV(
 
128
  torch.cuda.empty_cache()
129
  model = get_model()
130
 
131
+ username = username.strip()
132
+ username = username or "USER"
133
+
134
  intro = f'''The following is a verbose and detailed conversation between an AI assistant called FRITZ, and a human user called USER. FRITZ is intelligent, knowledgeable, wise and polite.
135
 
136
  {username}: What year was the french revolution?
 
226
 
227
  iface = gr.Interface(
228
  fn=infer,
229
+ description=f'''<h3>Generative and Question/Answer</h3>{desc}{thanks}''',
 
 
230
  allow_flagging="never",
231
  inputs=[
232
  gr.Textbox(lines=20, label="Prompt"), # prompt
 
243
 
244
  chatiface = gr.Interface(
245
  fn=chat,
246
+ description=f'''<h3>Chatbot</h3><h4>Refresh page or change name to reset memory context</h4>{desc}{thanks}''',
 
 
247
  allow_flagging="never",
248
  inputs=[
249
  gr.Textbox(lines=5, label="Message"), # prompt