File size: 1,282 Bytes
64ea77f 232aeb3 038d520 232aeb3 22c64f8 232aeb3 24d77bb 34bbbfe 232aeb3 4403c6c b16a490 24d77bb 0aa3d75 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import streamlit as st
import cv2 as cv
import time
import torch
from diffusers import StableDiffusionPipeline
# "stabilityai/stable-diffusion-2-1-base"
# "CompVis/stable-diffusion-v1-4"
def create_model(loc = "stabilityai/stable-diffusion-2-1-base", mch = 'cpu'):
pipe = StableDiffusionPipeline.from_pretrained(loc)
pipe = pipe.to(mch)
return pipe
t2i = st.title("Text2Image")
the_type = st.selectbox("Model Name",("stabilityai/stable-diffusion-2-1-base",
"CompVis/stable-diffusion-v1-4"))
create = st.button("Create The Model")
if create:
st.session_state.count.t2m_mod = create_model(loc=the_type)
prom = st.text_input("# Prompt",'')
c1,c2,c3 = st.columns([1,1,3])
c4,c5 = st.columns(2)
with c1:
bu_1 = st.text_input("Seed",'999')
with c2:
bu_2 = st.text_input("Steps",'12')
with c3:
bu_3 = st.text_input("Number of Images",'1')
with c4:
sl_1 = st.slider("Width",256,1024,128)
with c5:
sl_2 = st.slider("hight",256,1024,128)
create = st.button("Imagine")
if create:
generator = torch.Generator("cpu").manual_seed(int(bu_1))
model = st.session_state.count.t2m_mod
img = model(prom, width=int(sl_1), height=int(sl_2), num_inference_steps=int(bu_2), generator=generator).images[0]
st.image(img) |