Silence1412 commited on
Commit
e339223
·
1 Parent(s): d8366c4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -3
app.py CHANGED
@@ -3,12 +3,38 @@ import numpy as np
3
  import torch
4
  from PIL import Image
5
  from diffusers import StableDiffusionPipeline
 
 
 
6
 
7
  model_id = "runwayml/stable-diffusion-v1-5"
8
 
9
  pipe = StableDiffusionPipeline.from_pretrained(model_id).to('cpu')
10
 
11
- def infer(prompt, negative, steps, scale, seed):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  generator = torch.Generator(device='cpu').manual_seed(seed)
13
  img = pipe(
14
  prompt,
@@ -25,6 +51,14 @@ block = gr.Blocks()
25
 
26
  with block:
27
  with gr.Group():
 
 
 
 
 
 
 
 
28
  with gr.Box():
29
  with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
30
  with gr.Column():
@@ -63,6 +97,8 @@ with block:
63
  scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1, interactive=True)
64
  seed = gr.Slider(label="Random seed",minimum=0,maximum=2147483647,step=1,randomize=True,interactive=True)
65
 
66
- btn.click(infer, inputs=[text, negative, steps, scale, seed], outputs=[gallery])
 
 
67
 
68
- block.launch(show_api=False)
 
3
  import torch
4
  from PIL import Image
5
  from diffusers import StableDiffusionPipeline
6
+ from transformers import pipeline, set_seed
7
+ import random
8
+ import re
9
 
10
  model_id = "runwayml/stable-diffusion-v1-5"
11
 
12
  pipe = StableDiffusionPipeline.from_pretrained(model_id).to('cpu')
13
 
14
+ gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2')
15
+
16
+ def infer1(starting_text):
17
+ seed = random.randint(100, 1000000)
18
+ set_seed(seed)
19
+
20
+ if starting_text == "":
21
+ starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
22
+
23
+ response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4)
24
+ response_list = []
25
+ for x in response:
26
+ resp = x['generated_text'].strip()
27
+ if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
28
+ response_list.append(resp+'\n')
29
+
30
+ response_end = "\n".join(response_list)
31
+ response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
32
+ response_end = response_end.replace("<", "").replace(">", "")
33
+
34
+ if response_end != "":
35
+ return response_end
36
+
37
+ def infer2(prompt, negative, steps, scale, seed):
38
  generator = torch.Generator(device='cpu').manual_seed(seed)
39
  img = pipe(
40
  prompt,
 
51
 
52
  with block:
53
  with gr.Group():
54
+ with gr.Box():
55
+ txt = gr.Textbox(lines=1, label="Initial Text", placeholder="English Text here")
56
+ gpt_btn = gr.Button("Generate prompt").style(
57
+ margin=False,
58
+ rounded=(False, True, True, False),
59
+ )
60
+ out = gr.Textbox(lines=4, label="Generated Prompts")
61
+
62
  with gr.Box():
63
  with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
64
  with gr.Column():
 
97
  scale = gr.Slider(label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1, interactive=True)
98
  seed = gr.Slider(label="Random seed",minimum=0,maximum=2147483647,step=1,randomize=True,interactive=True)
99
 
100
+ title = "Stable Diffusion Prompt Generator"
101
+ gpt_btn.click(infer1,inputs=txt,outputs=out)
102
+ btn.click(infer2, inputs=[text, negative, steps, scale, seed], outputs=[gallery])
103
 
104
+ block.launch(show_api=False,enable_queue=True, debug=True)