Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,17 @@
|
|
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import os
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
|
@@ -6,15 +19,14 @@ def GenerateResp(prompt):
|
|
6 |
model = AutoModelForCausalLM.from_pretrained('rexwang8/qilin-lit-6b')
|
7 |
tokenizer = AutoTokenizer.from_pretrained('rexwang8/qilin-lit-6b')
|
8 |
|
9 |
-
#prompt = '''I had eyes but couldn't see Mount Tai!'''
|
10 |
-
|
11 |
input_ids = tokenizer.encode(prompt, return_tensors='pt')
|
12 |
output = model.generate(input_ids, do_sample=True, temperature=1.0, top_p=0.9, repetition_penalty=1.2, max_length=len(input_ids[0])+100, pad_token_id=tokenizer.eos_token_id)
|
13 |
generated_text = tokenizer.decode(output[0])
|
14 |
return generated_text
|
15 |
-
|
16 |
-
|
17 |
inputbox = gr.Textbox(label="Input",lines=3,placeholder='Type anything. The longer the better since it gives Qilin more context. Qilin is trained on english translated eastern (mostly chinese) webnovels.')
|
18 |
outputbox = gr.Textbox(label="Qilin-Lit-6B",lines=8)
|
19 |
iface = gr.Interface(fn=GenerateResp, inputs="text", outputs="text")
|
20 |
iface.launch()
|
|
|
|
1 |
+
|
2 |
import gradio as gr
|
3 |
+
|
4 |
+
title = "GPT-J-6B"
|
5 |
+
description = "GPT-J 6B, a transformer model trained using Ben Wang's Mesh Transformer JAX.'6B' is the number of trainable parameters. Add your text, or click one of the examples to load them."
|
6 |
+
article = "<p style='text-align: center'><a href='https://github.com/kingoflolz/mesh-transformer-jax' target='_blank'>GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model</a></p>"
|
7 |
+
examples = [
|
8 |
+
['A space ranger encounters a strange silhouette.'],
|
9 |
+
["A day on Saturn is 10 hours and 14 minutes."],
|
10 |
+
["There's no oxygen on Saturn, but roughly 75% hydrogen and 25% helium."]
|
11 |
+
]
|
12 |
+
gr.Interface.load("huggingface/rexwang8/qilin-lit-6b", inputs=gr.inputs.Textbox(lines=5, label="Input Text"),title=title,description=description,article=article, examples=examples,enable_queue=True).launch()
|
13 |
+
|
14 |
+
'''
|
15 |
import os
|
16 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
17 |
|
|
|
19 |
model = AutoModelForCausalLM.from_pretrained('rexwang8/qilin-lit-6b')
|
20 |
tokenizer = AutoTokenizer.from_pretrained('rexwang8/qilin-lit-6b')
|
21 |
|
|
|
|
|
22 |
input_ids = tokenizer.encode(prompt, return_tensors='pt')
|
23 |
output = model.generate(input_ids, do_sample=True, temperature=1.0, top_p=0.9, repetition_penalty=1.2, max_length=len(input_ids[0])+100, pad_token_id=tokenizer.eos_token_id)
|
24 |
generated_text = tokenizer.decode(output[0])
|
25 |
return generated_text
|
26 |
+
'''
|
27 |
+
'''
|
28 |
inputbox = gr.Textbox(label="Input",lines=3,placeholder='Type anything. The longer the better since it gives Qilin more context. Qilin is trained on english translated eastern (mostly chinese) webnovels.')
|
29 |
outputbox = gr.Textbox(label="Qilin-Lit-6B",lines=8)
|
30 |
iface = gr.Interface(fn=GenerateResp, inputs="text", outputs="text")
|
31 |
iface.launch()
|
32 |
+
'''
|