File size: 2,231 Bytes
26fd434
4ce6602
e17d60b
26fd434
4980386
 
4ce6602
 
 
4980386
 
4ce6602
4980386
 
 
5297371
848c9a0
 
4980386
 
 
848c9a0
 
4980386
 
 
 
 
 
 
 
 
848c9a0
4980386
 
e17d60b
4980386
 
 
5297371
4980386
 
 
 
 
 
 
 
5297371
4ce6602
 
4980386
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import spaces
from huggingface_hub import HfApi

@spaces.GPU(duration=60)
def generate_image(prompt, negative_prompt="", seed=-1, width=512, height=512, guidance_scale=7.5, num_inference_steps=50):
    # This function will be called by gr.load(), so we don't need to implement it here
    pass

# Load the pre-configured Flux model interface
flux_demo = gr.load("GenAIJake/d3xt3r")

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# D3XT3R Dachshund Image Generator (Extended Version)")
    
    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt here...")
            negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Enter negative prompt here...")
            generate_button = gr.Button("Generate Image")
        
        with gr.Column():
            seed = gr.Number(label="Seed", value=-1, precision=0, description="Set to -1 for random seed")
            width = gr.Slider(256, 1024, value=512, step=64, label="Width")
            height = gr.Slider(256, 1024, value=512, step=64, label="Height")
            guidance_scale = gr.Slider(1, 20, value=7.5, step=0.5, label="Guidance Scale")
            num_inference_steps = gr.Slider(10, 100, value=50, step=1, label="Number of Inference Steps")
    
    with gr.Row():
        image_output = gr.Image(label="Generated Image")
        seed_output = gr.Number(label="Used Seed", precision=0)
    
    # Set up the function call using the loaded Flux model interface
    generate_button.click(
        flux_demo.predict,
        inputs=[prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps],
        outputs=[image_output, seed_output]
    )
    
    # Add examples
    gr.Examples(
        examples=[
            ["d3xt3r as a camp counselor in the woods.", "", -1, 512, 512, 7.5, 50],
            ["d3xt3r dressed as batman", "blurry, low quality", 42, 640, 640, 8.0, 60],
            ["d3xt3r in a suit and tie", "", 123, 768, 512, 6.5, 40],
        ],
        inputs=[prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps],
    )

# Add GPUZero functionality
demo.queue()
demo.launch()