Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,65 +2,20 @@ import torch
|
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
import gradio as gr
|
4 |
|
5 |
-
# Load the model
|
6 |
model_id = "SG161222/RealVisXL_V4.0"
|
7 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
8 |
pipe.to("cpu") # Use "cuda" if GPU is available
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
default_negative = ""
|
13 |
-
NUM_IMAGES_PER_PROMPT = 1
|
14 |
-
|
15 |
-
def check_text(prompt, negative_prompt):
|
16 |
-
# Implement your text check logic here
|
17 |
-
return False
|
18 |
-
|
19 |
-
def apply_style(style, prompt, negative_prompt):
|
20 |
-
# Implement your style application logic here
|
21 |
-
return prompt, negative_prompt
|
22 |
-
|
23 |
-
def randomize_seed_fn(seed, randomize_seed):
|
24 |
-
# Implement your seed randomization logic here
|
25 |
-
return seed
|
26 |
-
|
27 |
-
def save_image(image):
|
28 |
-
# Implement your image saving logic here
|
29 |
return image
|
|
|
|
|
|
|
30 |
|
31 |
-
def generate_image(prompt, negative_prompt="", use_negative_prompt=False, style=DEFAULT_STYLE_NAME, seed=0, width=1024, height=1024, guidance_scale=3, randomize_seed=False, use_resolution_binning=True, progress=gr.Progress(track_tqdm=True)):
|
32 |
-
if check_text(prompt, negative_prompt):
|
33 |
-
raise ValueError("Prompt contains restricted words.")
|
34 |
-
|
35 |
-
prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
|
36 |
-
seed = int(randomize_seed_fn(seed, randomize_seed))
|
37 |
-
generator = torch.Generator().manual_seed(seed)
|
38 |
-
|
39 |
-
if not use_negative_prompt:
|
40 |
-
negative_prompt = ""
|
41 |
-
negative_prompt += default_negative
|
42 |
|
43 |
-
options = {
|
44 |
-
"prompt": prompt,
|
45 |
-
"negative_prompt": negative_prompt,
|
46 |
-
"width": width,
|
47 |
-
"height": height,
|
48 |
-
"guidance_scale": guidance_scale,
|
49 |
-
"num_inference_steps": 25,
|
50 |
-
"generator": generator,
|
51 |
-
"num_images_per_prompt": NUM_IMAGES_PER_PROMPT,
|
52 |
-
"use_resolution_binning": use_resolution_binning,
|
53 |
-
"output_type": "pil",
|
54 |
-
}
|
55 |
-
|
56 |
-
images = pipe(**options).images
|
57 |
|
58 |
-
image_paths = [save_image(img) for img in images]
|
59 |
-
return image_paths, seed
|
60 |
|
61 |
-
def chatbot(prompt):
|
62 |
-
# Generate the image based on the user's input
|
63 |
-
image = generate_image(prompt)
|
64 |
return image
|
65 |
|
66 |
# Create the Gradio interface
|
@@ -73,4 +28,4 @@ interface = gr.Interface(
|
|
73 |
)
|
74 |
|
75 |
# Launch the interface
|
76 |
-
interface.launch()
|
|
|
2 |
from diffusers import StableDiffusionPipeline
|
3 |
import gradio as gr
|
4 |
|
|
|
5 |
model_id = "SG161222/RealVisXL_V4.0"
|
6 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
|
7 |
pipe.to("cpu") # Use "cuda" if GPU is available
|
8 |
|
9 |
+
def generate_image(prompt):
|
10 |
+
image = pipe(prompt).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
return image
|
12 |
+
def chatbot(prompt):
|
13 |
+
# Generate the image based on the user's input
|
14 |
+
image = generate_image(prompt)
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
|
|
|
|
18 |
|
|
|
|
|
|
|
19 |
return image
|
20 |
|
21 |
# Create the Gradio interface
|
|
|
28 |
)
|
29 |
|
30 |
# Launch the interface
|
31 |
+
interface.launch()
|