Spaces:
Build error
Build error
saner defaults
Browse files
app.py
CHANGED
@@ -134,7 +134,7 @@ def chat(
|
|
134 |
if prompt == "":
|
135 |
prompt = " "
|
136 |
|
137 |
-
print(f"
|
138 |
print(f"OUTPUT ({datetime.now()}):\n-------\n")
|
139 |
# Load prompt
|
140 |
model.loadContext(newctx=prompt)
|
@@ -184,21 +184,24 @@ Q. Give me a list of vegetables.
|
|
184 |
Q. Give me a list of car manufacturers.''', "generative", 80, 0.2, 1.0, "\\n\\n,<|endoftext|>"],
|
185 |
[
|
186 |
# Natural Language Interface
|
187 |
-
'''You are the writing assistant for Stephen King. You have worked in the fiction/horror genre for 30 years. You are a Pulitzer Prize-winning author, and now you are tasked with developing a skeletal outline for his newest novel, set to be completed in the spring of 2024. Create a
|
|
|
|
|
188 |
]
|
189 |
|
190 |
|
191 |
iface = gr.Interface(
|
192 |
fn=infer,
|
193 |
-
description='''<p><a href='https://github.com/BlinkDL/RWKV-LM'>RWKV Language Model</a> - RNN With Transformer-level LLM Performance
|
194 |
-
|
|
|
195 |
allow_flagging="never",
|
196 |
inputs=[
|
197 |
gr.Textbox(lines=20, label="Prompt"), # prompt
|
198 |
gr.Radio(["generative","Q/A"], value="generative", label="Choose Mode"),
|
199 |
-
gr.Slider(1,
|
200 |
-
gr.Slider(0.0, 1.0, value=0.
|
201 |
-
gr.Slider(0.0, 1.0, value=0.
|
202 |
gr.Textbox(lines=1, value="<|endoftext|>") # stop
|
203 |
],
|
204 |
outputs=gr.Textbox(lines=25),
|
@@ -208,15 +211,16 @@ iface = gr.Interface(
|
|
208 |
|
209 |
chatiface = gr.Interface(
|
210 |
fn=chat,
|
211 |
-
description='''<p><a href='https://github.com/BlinkDL/RWKV-LM'>RWKV Language Model</a> - RNN With Transformer-level LLM Performance
|
212 |
-
|
|
|
213 |
allow_flagging="never",
|
214 |
inputs=[
|
215 |
gr.Textbox(lines=5, label="Message"), # prompt
|
216 |
"state",
|
217 |
-
gr.Slider(1,
|
218 |
-
gr.Slider(0.0, 1.0, value=0.
|
219 |
-
gr.Slider(0.0, 1.0, value=0.
|
220 |
gr.Textbox(lines=1, value="<|endoftext|>") # stop
|
221 |
],
|
222 |
outputs=[gr.Chatbot(color_map=("green", "pink")),"state"],
|
|
|
134 |
if prompt == "":
|
135 |
prompt = " "
|
136 |
|
137 |
+
print(f"CHAT ({datetime.now()}):\n-------\n{prompt}")
|
138 |
print(f"OUTPUT ({datetime.now()}):\n-------\n")
|
139 |
# Load prompt
|
140 |
model.loadContext(newctx=prompt)
|
|
|
184 |
Q. Give me a list of car manufacturers.''', "generative", 80, 0.2, 1.0, "\\n\\n,<|endoftext|>"],
|
185 |
[
|
186 |
# Natural Language Interface
|
187 |
+
'''You are the writing assistant for Stephen King. You have worked in the fiction/horror genre for 30 years. You are a Pulitzer Prize-winning author, and now you are tasked with developing a skeletal outline for his newest horror novel, set to be completed in the spring of 2024. Create a summary of this work.
|
188 |
+
|
189 |
+
Summary:''',"generative", 200, 0.85, 0.8, "<|endoftext|>"]
|
190 |
]
|
191 |
|
192 |
|
193 |
iface = gr.Interface(
|
194 |
fn=infer,
|
195 |
+
description='''<p><a href='https://github.com/BlinkDL/RWKV-LM'>RWKV Language Model</a> - RNN With Transformer-level LLM Performance.
|
196 |
+
According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding"
|
197 |
+
<p>Thanks to <a href='https://www.rftcapital.com'>RFT Capital</a> for donating compute capability for our experiments. Additional thanks to the author of the <a href="https://github.com/harrisonvanderbyl/rwkvstic">rwkvstic</a> library.</p>''',
|
198 |
allow_flagging="never",
|
199 |
inputs=[
|
200 |
gr.Textbox(lines=20, label="Prompt"), # prompt
|
201 |
gr.Radio(["generative","Q/A"], value="generative", label="Choose Mode"),
|
202 |
+
gr.Slider(1, 256, value=40), # max_tokens
|
203 |
+
gr.Slider(0.0, 1.0, value=0.8), # temperature
|
204 |
+
gr.Slider(0.0, 1.0, value=0.85), # top_p
|
205 |
gr.Textbox(lines=1, value="<|endoftext|>") # stop
|
206 |
],
|
207 |
outputs=gr.Textbox(lines=25),
|
|
|
211 |
|
212 |
chatiface = gr.Interface(
|
213 |
fn=chat,
|
214 |
+
description='''<p><a href='https://github.com/BlinkDL/RWKV-LM'>RWKV Language Model</a> - RNN With Transformer-level LLM Performance.
|
215 |
+
According to the author: "It combines the best of RNN and transformers - great performance, fast inference, saves VRAM, fast training, "infinite" ctx_len, and free sentence embedding"
|
216 |
+
<p>Thanks to <a href='https://www.rftcapital.com'>RFT Capital</a> for donating compute capability for our experiments. Additional thanks to the author of the <a href="https://github.com/harrisonvanderbyl/rwkvstic">rwkvstic</a> library.</p>''',
|
217 |
allow_flagging="never",
|
218 |
inputs=[
|
219 |
gr.Textbox(lines=5, label="Message"), # prompt
|
220 |
"state",
|
221 |
+
gr.Slider(1, 256, value=60), # max_tokens
|
222 |
+
gr.Slider(0.0, 1.0, value=0.8), # temperature
|
223 |
+
gr.Slider(0.0, 1.0, value=0.85), # top_p
|
224 |
gr.Textbox(lines=1, value="<|endoftext|>") # stop
|
225 |
],
|
226 |
outputs=[gr.Chatbot(color_map=("green", "pink")),"state"],
|