3laa2 commited on
Commit
232aeb3
·
1 Parent(s): cd574a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -9
app.py CHANGED
@@ -1,14 +1,57 @@
1
  import streamlit as st
2
- from PIL import Image
 
 
3
  import torch
4
  from diffusers import StableDiffusionPipeline
5
- import cv2 as cv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- st.title("Txt2Img")
8
- pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16")
9
- pipe.to("cpu")
10
- prom = st.text_area("Prompt",'')
11
- if len(prom)>5:
12
- image = pipe(prom).images[0]
13
- st.image(image)
 
 
 
 
 
14
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import cv2 as cv
3
+ import time
4
+ import streamlit as st
5
  import torch
6
  from diffusers import StableDiffusionPipeline
7
+ import os
8
+ import openai
9
+
10
+ # "stabilityai/stable-diffusion-2-1-base"
11
+ # "CompVis/stable-diffusion-v1-4"
12
+ def create_model(loc = "stabilityai/stable-diffusion-2-1-base", mch = 'cuda'):
13
+ pipe = StableDiffusionPipeline.from_pretrained(loc, torch_dtype=torch.float16)
14
+ pipe = pipe.to(mch)
15
+ return pipe
16
+
17
+
18
+
19
+ openai.api_key = "please-paste-your-API-key-here"
20
+
21
+ def chatWithGPT(prompt):
22
+ completion = openai.ChatCompletion.create(
23
+ model="gpt-3.5-turbo",
24
+ messages=[
25
+ {"role": "expert in creating prompts for stable diffusion", "content": prompt}
26
+ ]
27
+ )
28
+ return print(completion.choices[0].message.content)
29
+
30
+
31
+
32
+ t2i = st.checkbox("Text2Image")
33
+
34
+ if t2i:
35
+ st.title("Text2Image")
36
+ t2m_mod = create_model()
37
+
38
+ prom = st.text_input("# Prompt",'')
39
 
40
+ c1,c2,c3 = st.columns([1,1,3])
41
+ c4,c5 = st.columns(2)
42
+ with c1:
43
+ bu_1 = st.text_input("Seed",'999')
44
+ with c2:
45
+ bu_2 = st.text_input("Steps",'12')
46
+ with c3:
47
+ bu_3 = st.text_input("Number of Images",'1')
48
+ with c4:
49
+ sl_1 = st.slider("Width",256,1024,128)
50
+ with c5:
51
+ sl_2 = st.slider("hight",256,1024,128)
52
 
53
+ create = st.button("Imagine")
54
+ if create:
55
+ generator = torch.Generator("cuda").manual_seed(int(bu_1))
56
+ img = t2m_mod(prom, width=int(sl_1), height=int(sl_2), num_inference_steps=int(bu_2), generator=generator).images[0]
57
+ st.image(img)