File size: 1,450 Bytes
0bc0ba1 75fe830 0bc0ba1 6a719f4 08fb272 0bc0ba1 08fb272 0bc0ba1 2f5ab37 61602a6 2f5ab37 0bc0ba1 4dd46c5 caff1f3 75fe830 caff1f3 a82d516 caff1f3 ffc5ee5 0bc0ba1 caff1f3 9dd1cf6 caff1f3 0bc0ba1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
title = "Qilin-Lit-6B"
description = "Qilin-Lit-6B is a finetuned version of GPT-J-6B. It has been trained on webnovels. It can work as a general purpose fantasy novel storyteller."
examples = [
['I had eyes but couldn\'t see Mount Tai!'],
]
#gr.Interface.load("models/rexwang8/qilin-lit-6b", inputs="text", outputs="text",title=title,description=description, examples=examples).launch()
demo = gr.Interface.load("models/rexwang8/qilin-lit-6b", description=description, examples=examples)
demo.launch()
'''
import os
from transformers import AutoTokenizer, AutoModelForCausalLM
def GenerateResp(prompt):
model = AutoModelForCausalLM.from_pretrained('rexwang8/qilin-lit-6b')
tokenizer = AutoTokenizer.from_pretrained('rexwang8/qilin-lit-6b')
input_ids = tokenizer.encode(prompt, return_tensors='pt')
output = model.generate(input_ids, do_sample=True, temperature=1.0, top_p=0.9, repetition_penalty=1.2, max_length=len(input_ids[0])+100, pad_token_id=tokenizer.eos_token_id)
generated_text = tokenizer.decode(output[0])
return generated_text
'''
'''
inputbox = gr.Textbox(label="Input",lines=3,placeholder='Type anything. The longer the better since it gives Qilin more context. Qilin is trained on english translated eastern (mostly chinese) webnovels.')
outputbox = gr.Textbox(label="Qilin-Lit-6B",lines=8)
iface = gr.Interface(fn=GenerateResp, inputs="text", outputs="text")
iface.launch()
''' |