import torch
import gradio as gr
from diffusers import StableDiffusionPipeline
from diffusers import ControlNetModel, DDIMScheduler,EulerDiscreteScheduler,EulerAncestralDiscreteScheduler,UniPCMultistepScheduler
from diffusers import  KDPM2DiscreteScheduler,KDPM2AncestralDiscreteScheduler,PNDMScheduler,StableDiffusionPipeline
import random

def set_pipeline(model_id_repo,scheduler):

    model_ids_dict = {
    "runwayml": "runwayml/stable-diffusion-v1-5",
    "Realistic_Vision_V5_1_noVAE":"SG161222/Realistic_Vision_V5.1_noVAE"
}
    model_id = model_id_repo
    model_repo = model_ids_dict.get(model_id)
    print("model_repo :",model_repo)


    pipe = StableDiffusionPipeline.from_pretrained(
        model_repo,
        # torch_dtype=torch.float16,                               # to run on cpu
        use_safetensors=True,
    ).to("cpu")


    scheduler_classes = {
        "DDIM": DDIMScheduler,
        "Euler": EulerDiscreteScheduler,
        "Euler a": EulerAncestralDiscreteScheduler,
        "UniPC": UniPCMultistepScheduler,
        "DPM2 Karras": KDPM2DiscreteScheduler,
        "DPM2 a Karras": KDPM2AncestralDiscreteScheduler,
        "PNDM": PNDMScheduler,
    }

    sampler_name = scheduler  # Example sampler name, replace with the actual value
    scheduler_class = scheduler_classes.get(sampler_name)

    if scheduler_class is not None:
        print("sampler_name:",sampler_name)
        pipe.scheduler = scheduler_class.from_config(pipe.scheduler.config)
    else:
        pass  

    return pipe


def img_args(
             prompt,
             negative_prompt,
             model_id_repo = "Realistic_Vision_V5_1_noVAE",
             scheduler= "Euler a",
             height=896,
             width=896,
             num_inference_steps = 30,
             guidance_scale = 7.5,
             num_images_per_prompt = 1,
             seed = 0
             ):

    pipe = set_pipeline(model_id_repo,scheduler)

    if seed == 0:
        seed = random.randint(0,25647981548564)
        print(f"random seed :{seed}")
        generator = torch.manual_seed(seed)
    else:
        generator = torch.manual_seed(seed)
        print(f"manual seed :{seed}")

    print("Prompt:",prompt)
    image = pipe(prompt=prompt,
                 negative_prompt = negative_prompt,
                 height = height,
                 width = width,
                 num_inference_steps = num_inference_steps,
                 guidance_scale = guidance_scale,
                 num_images_per_prompt = num_images_per_prompt, # default 1
                 generator = generator,                
                 ).images
    return image


block = gr.Blocks().queue()
block.title = "Inpaint Anything"
with block as image_gen:
    with gr.Column():
        with gr.Row():
            gr.Markdown("## Image Generation")
        with gr.Row():
            with gr.Column():
                # with gr.Row():
                prompt = gr.Textbox(placeholder="what you want to generate",label="Positive Prompt")
                negative_prompt = gr.Textbox(placeholder="what you don't want to generate",label="Negative prompt")
                run_btn = gr.Button("image generation", elem_id="select_btn", variant="primary")
                with gr.Accordion(label="Advance Options",open=False):
                    model_selection = gr.Dropdown(choices=["runwayml","Realistic_Vision_V5_1_noVAE"],value="Realistic_Vision_V5_1_noVAE",label="Models")
                    schduler_selection = gr.Dropdown(choices=["DDIM","Euler","Euler a","UniPC","DPM2 Karras","DPM2 a Karras","PNDM"],value="Euler a",label="Scheduler")
                    guidance_scale_slider = gr.Slider(label="guidance_scale", minimum=0, maximum=15, value=7.5, step=0.5)
                    num_images_per_prompt_slider = gr.Slider(label="num_images_per_prompt", minimum=0, maximum=5, value=1, step=1)
                    height_slider = gr.Slider(label="height", minimum=0, maximum=2048, value=896, step=1)
                    width_slider = gr.Slider(label="width", minimum=0, maximum=2048, value=896, step=1)
                    num_inference_steps_slider = gr.Slider(label="num_inference_steps", minimum=0, maximum=150, value=30, step=1)
                    seed_slider = gr.Slider(label="Seed Slider", minimum=0, maximum=256479815, value=0, step=1)
            with gr.Column():
                # out_img = gr.Image(type="pil",label="Output",height=480)
                out_img = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True)
            
                    
    run_btn.click(fn=img_args,inputs=[prompt,negative_prompt,model_selection,schduler_selection,height_slider,width_slider,num_inference_steps_slider,guidance_scale_slider,num_images_per_prompt_slider,seed_slider],outputs=[out_img])            
image_gen.launch()