|
import gradio as gr |
|
import torch |
|
from transformers import pipeline |
|
|
|
|
|
model = pipeline("text-generation", model="black-forest-labs/FLUX.1-schnell") |
|
|
|
|
|
def generate_text(prompt, seed, num_inference_steps): |
|
|
|
generator = torch.manual_seed(seed) |
|
|
|
|
|
output = model(prompt, num_return_sequences=1, max_length=256, |
|
do_sample=True, generator=generator) |
|
return output[0]['generated_text'] |
|
|
|
|
|
interface = gr.Interface( |
|
fn=generate_text, |
|
inputs=[ |
|
gr.Textbox(label="Input Prompt"), |
|
gr.Number(label="Seed", default=0), |
|
gr.Number(label="Inference Steps", default=50) |
|
], |
|
outputs=gr.Textbox(label="Generated Text") |
|
) |
|
|
|
|
|
interface.launch() |
|
|