import gradio as gr import spaces from huggingface_hub import HfApi @spaces.GPU(duration=60) def generate_image(prompt, negative_prompt="", seed=-1, width=512, height=512, guidance_scale=7.5, num_inference_steps=50): # This function will be called by gr.load(), so we don't need to implement it here pass # Load the pre-configured Flux model interface flux_demo = gr.load("GenAIJake/d3xt3r") # Create the Gradio interface with gr.Blocks() as demo: gr.Markdown("# D3XT3R Dachshund Image Generator (Extended Version)") with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...") negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here...") generate_button = gr.Button("Generate Image") with gr.Column(): seed = gr.Number(label="Seed", value=-1, precision=0, description="Set to -1 for random seed") width = gr.Slider(256, 1024, value=512, step=64, label="Width") height = gr.Slider(256, 1024, value=512, step=64, label="Height") guidance_scale = gr.Slider(1, 20, value=7.5, step=0.5, label="Guidance Scale") num_inference_steps = gr.Slider(10, 100, value=50, step=1, label="Number of Inference Steps") with gr.Row(): image_output = gr.Image(label="Generated Image") seed_output = gr.Number(label="Used Seed", precision=0) # Set up the function call using the loaded Flux model interface generate_button.click( flux_demo.predict, inputs=[prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps], outputs=[image_output, seed_output] ) # Add examples gr.Examples( examples=[ ["d3xt3r as a camp counselor in the woods.", "", -1, 512, 512, 7.5, 50], ["d3xt3r dressed as batman", "blurry, low quality", 42, 640, 640, 8.0, 60], ["d3xt3r in a suit and tie", "", 123, 768, 512, 6.5, 40], ], inputs=[prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps], ) # Add GPUZero functionality demo.queue() demo.launch()