Jonny001 commited on
Commit
366b195
1 Parent(s): a4ae02f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -24
app.py CHANGED
@@ -3,32 +3,92 @@ import random
3
 
4
  model = gr.load("models/Purz/face-projection")
5
 
6
- def generate_image(text, seed):
7
- if seed is not None:
8
- random.seed(seed)
9
-
10
- if text in [example[0] for example in examples]:
11
- print(f"Using example: {text}")
12
-
13
  return model(text)
14
 
15
  examples = [
16
- ["Humanoid Cat Warrior, Full View", None],
17
- ["Warhammer Sisterhood", None],
18
- ["Future Robots war", None],
19
- ["Fantasy dragon", None]
20
  ]
21
 
22
- interface = gr.Interface(
23
- fn=generate_image,
24
- inputs=[
25
- gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
26
- gr.Slider(minimum=0, maximum=10000, step=1, label="Seed (optional)")
27
- ],
28
- outputs=gr.Image(label="Generated Image"),
29
- examples=examples,
30
- theme="NoCrypt/miku",
31
- description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
32
- )
33
-
34
- interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
  model = gr.load("models/Purz/face-projection")
5
 
6
+ def generate_image(text):
 
 
 
 
 
 
7
  return model(text)
8
 
9
  examples = [
10
+ ["Humanoid Cat Warrior, Full View"],
11
+ ["Warhammer Sisterhood"],
12
+ ["Future Robots war"],
13
+ ["Fantasy dragon"]
14
  ]
15
 
16
+ css = """
17
+ #col-container {
18
+ margin: 0 auto;
19
+ max-width: 640px;
20
+ }
21
+ """
22
+
23
+ with gr.Blocks(css=css) as interface:
24
+ with gr.Column(elem_id="col-container"):
25
+ gr.Markdown("# Text-to-Image Gradio Template")
26
+
27
+ with gr.Row():
28
+ prompt = gr.Textbox(
29
+ label="Type here your imagination:",
30
+ show_label=False,
31
+ max_lines=1,
32
+ placeholder="Type or click an example...",
33
+ container=False,
34
+ )
35
+
36
+ run_button = gr.Button("Generate Image", scale=0, variant="primary")
37
+
38
+ result = gr.Image(label="Generated Image", show_label=False)
39
+
40
+ with gr.Accordion("Advanced Settings", open=False):
41
+ negative_prompt = gr.Text(
42
+ label="Negative prompt",
43
+ max_lines=1,
44
+ placeholder="Enter a negative prompt",
45
+ visible=False,
46
+ )
47
+
48
+ with gr.Row():
49
+ width = gr.Slider(
50
+ label="Width",
51
+ minimum=256,
52
+ maximum=1024,
53
+ step=32,
54
+ value=1024
55
+ )
56
+ height = gr.Slider(
57
+ label="Height",
58
+ minimum=256,
59
+ maximum=1024,
60
+ step=32,
61
+ value=1024
62
+ )
63
+
64
+ with gr.Row():
65
+ guidance_scale = gr.Slider(
66
+ label="Guidance scale",
67
+ minimum=0.0,
68
+ maximum=10.0,
69
+ step=0.1,
70
+ value=0.0
71
+ )
72
+
73
+ num_inference_steps = gr.Slider(
74
+ label="Number of inference steps",
75
+ minimum=1,
76
+ maximum=50,
77
+ step=1,
78
+ value=2
79
+ )
80
+
81
+ gr.Examples(examples=examples, inputs=[prompt])
82
+
83
+ gr.on(
84
+ triggers=[run_button.click, prompt.submit],
85
+ fn=generate_image,
86
+ inputs=[prompt],
87
+ outputs=[result],
88
+ )
89
+
90
+ if __name__ == "__main__":
91
+ interface.launch(
92
+ theme="NoCrypt/miku",
93
+ description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding."
94
+ )