Update app.py
Browse files
app.py
CHANGED
@@ -1,59 +1,32 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
-
import
|
4 |
from PIL import Image
|
5 |
-
|
6 |
-
from tqdm import tqdm
|
7 |
-
import time
|
8 |
|
9 |
-
|
|
|
|
|
|
|
10 |
|
11 |
def infer(color_prompt, dress_type_prompt, design_prompt):
|
12 |
-
#
|
13 |
prompt = (
|
14 |
f"A high-quality digital image of a {color_prompt} {dress_type_prompt}, "
|
15 |
-
f"featuring a {design_prompt} printed in sharp detail
|
16 |
-
f"facing front, hanging on
|
17 |
-
f"The fabric has realistic texture,"
|
18 |
-
f"
|
19 |
-
f"
|
20 |
)
|
21 |
-
|
22 |
print("Generating image with prompt:", prompt)
|
23 |
-
api_url = f"https://api-inference.huggingface.co/models/{repo}"
|
24 |
-
|
25 |
-
headers = {} # If API token needed, add here
|
26 |
-
|
27 |
-
payload = {
|
28 |
-
"inputs": prompt,
|
29 |
-
"parameters": {
|
30 |
-
# Optimized negative prompt
|
31 |
-
"negative_prompt": "low quality, artifacts, distorted, blurry, overexposed, underexposed, unrealistic texture, poor lighting, misaligned print, plastic-like fabric, grainy, washed-out colors, 3D render, cartoon, digital art, watermark, bad anatomy, malformed, cluttered design",
|
32 |
-
"num_inference_steps": 30,
|
33 |
-
"scheduler": "EulerAncestralDiscreteScheduler" # Faster & more accurate scheduler
|
34 |
-
},
|
35 |
-
}
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
response = requests.post(api_url, headers=headers, json=payload)
|
43 |
-
print("API response status code:", response.status_code)
|
44 |
-
|
45 |
-
if response.status_code == 200:
|
46 |
-
print("Image generation successful!")
|
47 |
-
return Image.open(BytesIO(response.content))
|
48 |
-
elif response.status_code == 503:
|
49 |
-
time.sleep(1)
|
50 |
-
pbar.update(1)
|
51 |
-
elif response.status_code == 500 and error_count < 5:
|
52 |
-
time.sleep(1)
|
53 |
-
error_count += 1
|
54 |
-
else:
|
55 |
-
print("API Error:", response.status_code)
|
56 |
-
raise Exception(f"API Error: {response.status_code}")
|
57 |
|
58 |
# Gradio Interface
|
59 |
iface = gr.Interface(
|
@@ -70,4 +43,4 @@ iface = gr.Interface(
|
|
70 |
)
|
71 |
|
72 |
print("Launching Gradio interface...")
|
73 |
-
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import torch
|
4 |
from PIL import Image
|
5 |
+
import tempfile
|
|
|
|
|
6 |
|
7 |
+
print("Loading Rookus T1 model...")
|
8 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16)
|
9 |
+
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
|
10 |
+
pipe.set_progress_bar_config(disable=True)
|
11 |
|
12 |
def infer(color_prompt, dress_type_prompt, design_prompt):
|
13 |
+
# Construct prompt
|
14 |
prompt = (
|
15 |
f"A high-quality digital image of a {color_prompt} {dress_type_prompt}, "
|
16 |
+
f"featuring a {design_prompt} printed in sharp detail on the {dress_type_prompt}, "
|
17 |
+
f"facing front, hanging on a plain wall. "
|
18 |
+
f"The fabric has realistic texture, smooth folds, and accurate lighting. "
|
19 |
+
f"The design is perfectly aligned, with natural shadows and highlights, "
|
20 |
+
f"creating a photorealistic look."
|
21 |
)
|
22 |
+
|
23 |
print("Generating image with prompt:", prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
+
# Generate image
|
26 |
+
result = pipe(prompt, num_inference_steps=4, guidance_scale=0.0) # SDXL-Turbo is optimized for fast steps
|
27 |
+
image = result.images[0]
|
28 |
+
|
29 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
# Gradio Interface
|
32 |
iface = gr.Interface(
|
|
|
43 |
)
|
44 |
|
45 |
print("Launching Gradio interface...")
|
46 |
+
iface.launch()
|