|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline |
|
|
|
model = AutoModelForCausalLM.from_pretrained('cbauer/groupchatGPT', trust_remote_code=True, ignore_mismatched_sizes=True) |
|
tokenizer = AutoTokenizer.from_pretrained('cbauer/groupchatGPT') |
|
|
|
generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) |
|
|
|
generator = pipeline('text-generation', model=model, trust_remote_code=True) |
|
|
|
def generate(text): |
|
result = generator(text, max_length=30, num_return_sequences=1) |
|
return result[0]["generated_text"] |
|
|
|
examples = [ |
|
["###Chase Hello"], |
|
["### Jake Whats up"], |
|
] |
|
|
|
demo = gr.Interface( |
|
fn=generate, |
|
inputs=gr.inputs.Textbox(lines=5, label="Input Text"), |
|
outputs=gr.outputs.Textbox(label="Generated Text"), |
|
examples=examples |
|
) |
|
|
|
demo.launch() |