HelloSun commited on
Commit
3f9701b
·
verified ·
1 Parent(s): 5473b5a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -76
app.py CHANGED
@@ -6,41 +6,26 @@ from optimum.intel.openvino import OVStableDiffusionPipeline
6
  import torch
7
 
8
 
9
-
10
-
11
  model_id = "helenai/Linaqruf-anything-v3.0-ov"
12
 
13
  pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile=False)
14
  pipe.reshape( batch_size=1, height=256, width=256, num_images_per_prompt=1)
15
  pipe.compile()
16
 
 
17
 
18
-
19
- MAX_SEED = np.iinfo(np.int32).max
20
- MAX_IMAGE_SIZE = 256
21
-
22
- def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
23
-
24
- #if randomize_seed:
25
- # seed = random.randint(0, MAX_SEED)
26
-
27
- #generator = torch.Generator().manual_seed(seed)
28
-
29
  image = pipe(
30
  prompt = prompt,
31
  negative_prompt = negative_prompt,
32
- #guidance_scale = guidance_scale,
33
- #num_inference_steps = num_inference_steps,
34
- width = width,
35
- height = height,
36
- #generator = generator
37
  ).images[0]
38
 
39
  return image
40
 
41
  examples = [
42
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
43
- "An astronaut riding a green horse",
44
  "A delicious ceviche cheesecake slice",
45
  ]
46
 
@@ -76,61 +61,6 @@ with gr.Blocks(css=css) as demo:
76
 
77
  result = gr.Image(label="Result", show_label=False)
78
 
79
- with gr.Accordion("Advanced Settings", open=False):
80
-
81
- negative_prompt = gr.Text(
82
- label="Negative prompt",
83
- max_lines=1,
84
- placeholder="Enter a negative prompt",
85
- visible=False,
86
- )
87
-
88
- seed = gr.Slider(
89
- label="Seed",
90
- minimum=0,
91
- maximum=MAX_SEED,
92
- step=1,
93
- value=0,
94
- )
95
-
96
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
97
-
98
- with gr.Row():
99
-
100
- width = gr.Slider(
101
- label="Width",
102
- minimum=256,
103
- maximum=MAX_IMAGE_SIZE,
104
- step=32,
105
- value=256,
106
- )
107
-
108
- height = gr.Slider(
109
- label="Height",
110
- minimum=256,
111
- maximum=MAX_IMAGE_SIZE,
112
- step=32,
113
- value=256,
114
- )
115
-
116
- with gr.Row():
117
-
118
- guidance_scale = gr.Slider(
119
- label="Guidance scale",
120
- minimum=0.0,
121
- maximum=10.0,
122
- step=0.1,
123
- value=3.0,
124
- )
125
-
126
- num_inference_steps = gr.Slider(
127
- label="Number of inference steps",
128
- minimum=1,
129
- maximum=25,
130
- step=1,
131
- value=25,
132
- )
133
-
134
  gr.Examples(
135
  examples = examples,
136
  inputs = [prompt]
@@ -138,7 +68,7 @@ with gr.Blocks(css=css) as demo:
138
 
139
  run_button.click(
140
  fn = infer,
141
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
142
  outputs = [result]
143
  )
144
 
 
6
  import torch
7
 
8
 
 
 
9
  model_id = "helenai/Linaqruf-anything-v3.0-ov"
10
 
11
  pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile=False)
12
  pipe.reshape( batch_size=1, height=256, width=256, num_images_per_prompt=1)
13
  pipe.compile()
14
 
15
+ def infer(prompt, negative_prompt):
16
 
 
 
 
 
 
 
 
 
 
 
 
17
  image = pipe(
18
  prompt = prompt,
19
  negative_prompt = negative_prompt,
20
+ width = 256,
21
+ height = 256,
 
 
 
22
  ).images[0]
23
 
24
  return image
25
 
26
  examples = [
27
+ "A cute kitten, Japanese cartoon style.",
28
+ "A sweet family, dad stands next to mom, mom holds baby girl.",
29
  "A delicious ceviche cheesecake slice",
30
  ]
31
 
 
61
 
62
  result = gr.Image(label="Result", show_label=False)
63
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  gr.Examples(
65
  examples = examples,
66
  inputs = [prompt]
 
68
 
69
  run_button.click(
70
  fn = infer,
71
+ inputs = [prompt, negative_prompt],
72
  outputs = [result]
73
  )
74