Spaces:
Paused
Paused
import gradio as gr | |
import transformers as t | |
import torch | |
# Load your fine-tuned model and tokenizer | |
model = t.AutoModelForCausalLM.from_pretrained("./weights") | |
tokenizer = t.AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf") | |
tokenizer.pad_token_id = 0 | |
# Define a prediction function | |
def generate_article(title): | |
prompt = f"Below is a title for an article. Write an article that appropriately suits the title: \n\n### Title:\n{title}\n\n### Article:\n" | |
pipe = t.pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=1000) | |
output = pipe([prompt]) | |
generated_article = output[0][0]["generated_text"] | |
return generated_article | |
# Create a Gradio interface | |
iface = gr.Interface( | |
fn=generate_article, | |
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter Article Title Here"), | |
outputs="text", | |
title="Article Generator", | |
description="Enter a title to generate an article." | |
) | |
# Launch the app | |
iface.launch() | |