File size: 707 Bytes
d5a0191
e89e37f
d5a0191
e89e37f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import gradio as gr
from transformers import pipeline

# Load the Meta-Llama-3.1-8B-Instruct-GGUF model
model_name = "lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF"
model = pipeline("text-generation", model=model_name, device=-1)  # -1 for CPU

# Define the Gradio interface
def generate_text(prompt):
    output = model(prompt)[0]["generated_text"]
    return output

iface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(label="Prompt"),
    outputs=gr.Textbox(label="Generated Text"),
    title="Meta-Llama-3.1-8B-Instruct-GGUF Text Generation",
    description="Enter a prompt to generate text using the Meta-Llama-3.1-8B-Instruct-GGUF model.",
)

# Launch the Gradio app
iface.launch()