File size: 3,283 Bytes
b98931c 2b488ea 72fc206 c982daf b98931c cdc6820 b98931c 72fc206 a1b396c b98931c 8127009 b98931c cdc6820 b98931c b048535 283b5ce b98931c b048535 b98931c 89f92a3 b98931c 9e67631 b98931c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
######################
# Datos importados
######################
#Datos importados de: https://huggingface.co/runwayml/stable-diffusion-v1-5
######################
# Import libraries
######################
import gradio as gr
# ######################
# Variables globales
# ######################
import os
USUARIO = os.getenv("USUARIO")
#model_id = "helenai/runwayml-stable-diffusion-v1-5-ov-fp32"
model_id = "runwayml/stable-diffusion-v1-5" #-> original
# ######################
# Funciones auxiliares
# ######################
if False:
def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
with st.container():
st.write(f'Vamos por la iteración {step}')
st.progress(step*2) #bar_progress debe empezar con 0 y terminar en 100
st.write(f'Quedan {timestep/100:.0f} segundos')
# ######################
# Modelo
# ######################
def ia_imagenes(modelo, prompt, prompt_negativo, uploaded_file, my_strength, my_guidance_scale):
if modelo == "Texto":
from diffusers import StableDiffusionPipeline
import torch
pipe = StableDiffusionPipeline.from_pretrained(
model_id,
#revision="fp16" if torch.cuda.is_available() else "fp32",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
requires_safety_checker = False
).to("cpu")
image_pipe = pipe(prompt, negative_prompt=prompt_negativo, width=728, height=728) #otras variables: guidance_scale=guidance_scale, num_inference_steps=steps, callback = pipe_callback
imagen = image_pipe.images[0]
return imagen
elif modelo == "Imagen":
from diffusers import StableDiffusionImg2ImgPipeline
from PIL import Image
import torch
uploaded_file = Image.fromarray(uploaded_file)
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id,
#revision="fp16" if torch.cuda.is_available() else "fp32",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
requires_safety_checker = False
).to("cpu")
my_strength = 0.8 if my_strength == 0 or my_strength == None else my_strength
my_guidance_scale = 7.5 if my_guidance_scale == 0 or my_guidance_scale == None else my_guidance_scale
imagen = pipe(prompt, image=uploaded_file, negative_prompt=prompt_negativo, strength = my_strength, guidance_scale = my_guidance_scale).images[0] #, strength=0.8, guidance_scale=7.5
return imagen
else:
raise gr.Error("Te has olvidado de marcar una opción")
demo = gr.Interface(
fn = ia_imagenes,
inputs = [
gr.Radio(["Texto", "Imagen"],value = "Texto"),
"text",
"text",
"image",
"number",
"number"
],
outputs = "image",
title="Creación de Imagenes 🖼️",
)
demo.launch(show_error = True, auth=(USUARIO, USUARIO), share=False)
'''
FALTA:
-BOTON PARA DESCARGAR IMAGEN
-TIEMPO QUE TARDA
'''
|