File size: 627 Bytes
d99b89b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
import torch
from transformers import LLaMAForConditionalGeneration, LLaMATokenizer
model = LLaMAForConditionalGeneration.from_pretrained("meta-llama/llama-3-1-405b")
tokenizer = LLaMATokenizer.from_pretrained("meta-llama/llama-3-1-405b")
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
output = model.generate(**inputs)
return tokenizer.decode(output, skip_special_tokens=True)
demo = gr.Interface(
fn=generate_text,
inputs="text",
outputs="text",
title="LLaMA Text Generation",
description="Enter a prompt to generate text using the LLaMA model."
)
demo.launch() |