wifix199 commited on
Commit
cc63412
1 Parent(s): 606bbe6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -15
app.py CHANGED
@@ -2,23 +2,33 @@ import torch
2
  from diffusers import StableDiffusionPipeline
3
  import gradio as gr
4
 
 
5
  model_id = "SG161222/RealVisXL_V4.0"
6
  pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
7
  pipe.to("cpu") # Use "cuda" if GPU is available
8
 
9
- def generate_image(
10
- prompt: str,
11
- negative_prompt: str = "",
12
- use_negative_prompt: bool = False,
13
- style: str = DEFAULT_STYLE_NAME,
14
- seed: int = 0,
15
- width: int = 1024,
16
- height: int = 1024,
17
- guidance_scale: float = 3,
18
- randomize_seed: bool = False,
19
- use_resolution_binning: bool = True,
20
- progress=gr.Progress(track_tqdm=True),
21
- ):
 
 
 
 
 
 
 
 
 
22
  if check_text(prompt, negative_prompt):
23
  raise ValueError("Prompt contains restricted words.")
24
 
@@ -27,7 +37,7 @@ def generate_image(
27
  generator = torch.Generator().manual_seed(seed)
28
 
29
  if not use_negative_prompt:
30
- negative_prompt = "" # type: ignore
31
  negative_prompt += default_negative
32
 
33
  options = {
@@ -43,7 +53,7 @@ def generate_image(
43
  "output_type": "pil",
44
  }
45
 
46
- images = pipe(**options).images + pipe2(**options).images
47
 
48
  image_paths = [save_image(img) for img in images]
49
  return image_paths, seed
 
2
  from diffusers import StableDiffusionPipeline
3
  import gradio as gr
4
 
5
+ # Load the model
6
  model_id = "SG161222/RealVisXL_V4.0"
7
  pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
8
  pipe.to("cpu") # Use "cuda" if GPU is available
9
 
10
+ # Define placeholder functions and variables
11
+ DEFAULT_STYLE_NAME = "default"
12
+ default_negative = ""
13
+ NUM_IMAGES_PER_PROMPT = 1
14
+
15
+ def check_text(prompt, negative_prompt):
16
+ # Implement your text check logic here
17
+ return False
18
+
19
+ def apply_style(style, prompt, negative_prompt):
20
+ # Implement your style application logic here
21
+ return prompt, negative_prompt
22
+
23
+ def randomize_seed_fn(seed, randomize_seed):
24
+ # Implement your seed randomization logic here
25
+ return seed
26
+
27
+ def save_image(image):
28
+ # Implement your image saving logic here
29
+ return image
30
+
31
+ def generate_image(prompt, negative_prompt="", use_negative_prompt=False, style=DEFAULT_STYLE_NAME, seed=0, width=1024, height=1024, guidance_scale=3, randomize_seed=False, use_resolution_binning=True, progress=gr.Progress(track_tqdm=True)):
32
  if check_text(prompt, negative_prompt):
33
  raise ValueError("Prompt contains restricted words.")
34
 
 
37
  generator = torch.Generator().manual_seed(seed)
38
 
39
  if not use_negative_prompt:
40
+ negative_prompt = ""
41
  negative_prompt += default_negative
42
 
43
  options = {
 
53
  "output_type": "pil",
54
  }
55
 
56
+ images = pipe(**options).images
57
 
58
  image_paths = [save_image(img) for img in images]
59
  return image_paths, seed