import os 
import sys
from pathlib import Path
from all_models import models
       
current_model = models[0]


#text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion") 
text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend") 
#text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend") 
#text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")

models2=[
    gr.Interface.load(f"models/{models[0]}",live=False,preprocess=True, postprocess=False),
    gr.Interface.load(f"models/{models[1]}",live=False,preprocess=True, postprocess=False),
    gr.Interface.load(f"models/{models[2]}",live=False,preprocess=True, postprocess=False),
    gr.Interface.load(f"models/{models[3]}",live=False,preprocess=True, postprocess=False),
    gr.Interface.load(f"models/{models[4]}",live=False,preprocess=True, postprocess=False),
    gr.Interface.load(f"models/{models[5]}",live=False,preprocess=True, postprocess=False),
    #Because there's a model 0, to know the number of models you add 1 to {models[n]}
]

def text_it1(inputs,text_gen1=text_gen1):
        go_t1=text_gen1(inputs)
        return(go_t1)

def set_model(current_model):
    current_model = models[current_model]
    return gr.update(label=(f"{current_model}"))


def send_it1(inputs, model_choice): #negative_prompt,
        proc1=models2[model_choice]
        output1=proc1(inputs)
        #negative_prompt=negative_prompt
        return(output1)
css=""""""


with gr.Blocks(css=css) as myface:
    gr.HTML("""
     <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
              <div>
                <style>
                    h1 {
                    font-size: 6em;
                    color: #c9c9c9;
                    margin-top: 30px;
                    margin-bottom: 30px;
                    text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
                   }
                   h3 {
                    color: #c9c9c9; !important;
                   }
                   h4 {
                    color: #ffffff; !important;
                   }
                   .gradio-container {
                   background-image: linear-gradient(#252525, #1e1e1e, #181818) !important;
                   color: #aaaaaa !important;
                   font-family: 'IBM Plex Sans', sans-serif !important;
                   }
                   .text-gray-500 {
                   color: #c9c9c9 !important;
                   }
                   .gr-box {
    background-image: linear-gradient(#181818, #1e1e1e, #252525) !important;
    border-top-color: #000000 !important;
    border-right-color: #ffffff !important;
    border-bottom-color: #ffffff !important;
    border-left-color: #000000 !important;
                   }
                   .gr-input {
                   color: #c9c9c9; !important;
                   background-color: #252525 !important;
                   }
                   :root {
    --neutral-100: #000000 !important;
                   }
                </style>
                <body>
                <div class="center"><h1>Printing Press</h1>
                </div>
                </body>
              </div>
              <p style="margin-bottom: 9px; color: #aaaaaa;">
              <h3>Top 684 Blitz Diffusion Models - A permanently online (unless huggingface is acting up, ha!) backup copy of <a href="https://huggingface.co/spaces/Yntec/ToyWorld"><u><p style="color:#8150df;"><b>Toy World!</b></p></u></a></h3></p>
            </div>
            """)
    with gr.Row():
        with gr.Column(scale=100):
            #Model selection dropdown    
            model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
    with gr.Row():
        with gr.Column(scale=100):
            magic1=gr.Textbox(label="Your Prompt", lines=4) #Positive
        #with gr.Column(scale=100):
            #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
            gr.HTML("""<style>           .gr-button {
            color: #ffffff !important;
            text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
            background-image: linear-gradient(#635a76, #a489d2) !important;
            border-radius: 24px !important;
            border: solid 1px !important;
            border-top-color: #c99fff !important;
            border-right-color: #000000 !important;
            border-bottom-color: #000000 !important;
            border-left-color: #c99fff !important;
            padding: 6px 30px;
}
.gr-button:active {
            color: #c99fff !important;
            font-size: 98% !important;
            text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
            background-image: linear-gradient(#a489d2, #635a76) !important;
            border-top-color: #000000 !important;
            border-right-color: #ffffff !important;
            border-bottom-color: #ffffff !important;
            border-left-color: #000000 !important;
}
.gr-button:hover {
  filter: brightness(130%);
}
</style>""")
            run=gr.Button("Generate Image")
    with gr.Row():
        with gr.Column(style="width=800px"):
            output1=gr.Image(label=(f"{current_model}"))
                
            
    with gr.Row():
        with gr.Column(scale=50):
            input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea",lines=2)
            see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your PromptĀ“ box above")
            use_short=gr.Button("Copy the contents of this box to the `Your PromptĀ“ box above")
    def short_prompt(inputs):
        return(inputs)
    
    model_name1.change(set_model,inputs=model_name1,outputs=[output1])
    
    run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
    
    use_short.click(short_prompt,inputs=[input_text],outputs=magic1)
    
    see_prompts.click(text_it1,inputs=[input_text],outputs=magic1)
    
myface.queue(concurrency_count=200)
myface.launch(inline=True, show_api=False, max_threads=400)