Update app.py
Browse files
app.py
CHANGED
@@ -10,15 +10,15 @@ tinier = AutoModelForCausalLM.from_pretrained("afrizalha/Sasando-1-7M", token=hf
|
|
10 |
|
11 |
desc = """Sasando-1 is a tiny, highly experimental text generator built using the Phi-3 architecture. It comes with two variations of microscopic sizes: 7M and 25M parameters. It is trained on a tightly-controlled Indo4B dataset filtered to only have 18000 unique words. The method is inspired by Microsoft's TinyStories paper which demonstrates that a tiny language model can produce fluent text when trained on tightly-controlled dataset.\n\nTry prompting with two simple words, and let the model continue. Fun examples provided below."""
|
12 |
|
13 |
-
def generate(starting_text, choice, temp, top_p):
|
|
|
|
|
|
|
14 |
if choice == '7M':
|
15 |
model = tinier
|
16 |
elif choice == '25M':
|
17 |
model = tiny
|
18 |
-
|
19 |
-
yield desc
|
20 |
-
return
|
21 |
-
|
22 |
results = []
|
23 |
for i in range(5):
|
24 |
inputs = tokenizer([starting_text], return_tensors="pt").to(model.device)
|
@@ -32,23 +32,30 @@ def generate(starting_text, choice, temp, top_p):
|
|
32 |
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
33 |
outputs = outputs[:outputs.find(".")]
|
34 |
results.append(outputs)
|
35 |
-
|
36 |
-
|
37 |
with gr.Blocks(theme=gr.themes.Soft()) as app:
|
38 |
starting_text = gr.Textbox(label="Starting text", value="cinta adalah")
|
39 |
-
choice = gr.Radio(["7M", "25M"
|
40 |
-
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
43 |
res = gr.Textbox(label="Continuation")
|
|
|
44 |
gr.Interface(
|
45 |
fn=generate,
|
46 |
-
inputs=[starting_text,choice,temp,top_p],
|
47 |
outputs=[res],
|
48 |
allow_flagging="never",
|
49 |
title="Sasando-1",
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
53 |
|
54 |
app.launch()
|
|
|
10 |
|
11 |
desc = """Sasando-1 is a tiny, highly experimental text generator built using the Phi-3 architecture. It comes with two variations of microscopic sizes: 7M and 25M parameters. It is trained on a tightly-controlled Indo4B dataset filtered to only have 18000 unique words. The method is inspired by Microsoft's TinyStories paper which demonstrates that a tiny language model can produce fluent text when trained on tightly-controlled dataset.\n\nTry prompting with two simple words, and let the model continue. Fun examples provided below."""
|
12 |
|
13 |
+
def generate(starting_text=None, choice=None, temp=None, top_p=None, info=False):
|
14 |
+
if info:
|
15 |
+
return desc
|
16 |
+
|
17 |
if choice == '7M':
|
18 |
model = tinier
|
19 |
elif choice == '25M':
|
20 |
model = tiny
|
21 |
+
|
|
|
|
|
|
|
22 |
results = []
|
23 |
for i in range(5):
|
24 |
inputs = tokenizer([starting_text], return_tensors="pt").to(model.device)
|
|
|
32 |
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
33 |
outputs = outputs[:outputs.find(".")]
|
34 |
results.append(outputs)
|
35 |
+
return "\n\n".join(results)
|
36 |
+
|
37 |
with gr.Blocks(theme=gr.themes.Soft()) as app:
|
38 |
starting_text = gr.Textbox(label="Starting text", value="cinta adalah")
|
39 |
+
choice = gr.Radio(["7M", "25M"], label="Select model", value="7M")
|
40 |
+
info_button = gr.Button("Info")
|
41 |
+
|
42 |
+
with gr.Row():
|
43 |
+
temp = gr.Slider(label="Temperature", minimum=0.05, maximum=1.0, step=0.05, value=0.7)
|
44 |
+
top_p = gr.Slider(label="Top P", minimum=0.05, maximum=1.0, step=0.05, value=0.5)
|
45 |
+
|
46 |
res = gr.Textbox(label="Continuation")
|
47 |
+
|
48 |
gr.Interface(
|
49 |
fn=generate,
|
50 |
+
inputs=[starting_text, choice, temp, top_p, info_button],
|
51 |
outputs=[res],
|
52 |
allow_flagging="never",
|
53 |
title="Sasando-1",
|
54 |
+
)
|
55 |
+
|
56 |
+
examples = gr.Examples([
|
57 |
+
["gue"], ["presiden"], ["cinta adalah"], ["allah, aku"], ["dia marah karena"],
|
58 |
+
["inflasi"], ["kolam renang"], ["messi"], ["jalan-jalan"], ["komputer itu"]
|
59 |
+
], [starting_text])
|
60 |
|
61 |
app.launch()
|