File size: 2,277 Bytes
14f7e5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b2b04c
de7f4e9
14f7e5e
 
 
 
 
1b54286
14f7e5e
 
74d8330
14f7e5e
 
5dd9c5c
14f7e5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5dd9c5c
 
14f7e5e
 
 
 
462369c
de7f4e9
462369c
 
14f7e5e
5dd9c5c
14f7e5e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#___________________________________________________________________________________________________________________________

import streamlit as st 
import os

#___________________________________________________________________________________________________________________________

import torch
from torch import autocast
from diffusers import StableDiffusionPipeline
from datasets import load_dataset
from PIL import Image  
import re

#___________________________________________________________________________________________________________________________

st.title('IMGTEXTA')
prompt=st.text_input('Enter Your Prompt')
Generate=st.button('Generate')

#___________________________________________________________________________________________________________________________

model_id = "CompVis/stable-diffusion-v1-4"
device = "cpu"
st.info('1')
#___________________________________________________________________________________________________________________________

pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token='Add Your Info', torch_dtype=torch.float32)
def dummy(images, **kwargs): return images, False 
pipe.safety_checker = dummy
st.info('2')
#___________________________________________________________________________________________________________________________

def infer(prompt, width, height, steps, scale, seed):      
    if seed == -1:	
        images_list = pipe(
            [prompt],
            height=height, 
            width=width,
            num_inference_steps=steps,
            guidance_scale=scale,
            generator=torch.Generator(device=device).manual_seed(seed))
    else:
        images_list = pipe(
            [prompt],
            height=height, 
            width=width,
            num_inference_steps=steps,
            guidance_scale=scale)

    return images_list["sample"]
    st.info('5')
st.info('3')
#___________________________________________________________________________________________________________________________

def onclick(prompt):
    st.image(infer(prompt,512,512,30,7.5,-1))



if Generate==True:
    onclick(prompt)
    st.info('4')

#___________________________________________________________________________________________________________________________