gaur3009 commited on
Commit
b7ce479
·
verified ·
1 Parent(s): 59929aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -47
app.py CHANGED
@@ -1,59 +1,32 @@
1
  import gradio as gr
2
- import requests
3
- import os
4
  from PIL import Image
5
- from io import BytesIO
6
- from tqdm import tqdm
7
- import time
8
 
9
- repo = "artificialguybr/TshirtDesignRedmond-V2"
 
 
 
10
 
11
  def infer(color_prompt, dress_type_prompt, design_prompt):
12
- # Improved prompt for higher accuracy
13
  prompt = (
14
  f"A high-quality digital image of a {color_prompt} {dress_type_prompt}, "
15
- f"featuring a {design_prompt} printed in sharp detail printedon the {dress_type_prompt},"
16
- f"facing front, hanging on he plain wall"
17
- f"The fabric has realistic texture,"
18
- f"smooth folds, and accurate lighting. The design is perfectly aligned, with natural shadows "
19
- f"and highlights, creating a photorealistic look."
20
  )
21
-
22
  print("Generating image with prompt:", prompt)
23
- api_url = f"https://api-inference.huggingface.co/models/{repo}"
24
-
25
- headers = {} # If API token needed, add here
26
-
27
- payload = {
28
- "inputs": prompt,
29
- "parameters": {
30
- # Optimized negative prompt
31
- "negative_prompt": "low quality, artifacts, distorted, blurry, overexposed, underexposed, unrealistic texture, poor lighting, misaligned print, plastic-like fabric, grainy, washed-out colors, 3D render, cartoon, digital art, watermark, bad anatomy, malformed, cluttered design",
32
- "num_inference_steps": 30,
33
- "scheduler": "EulerAncestralDiscreteScheduler" # Faster & more accurate scheduler
34
- },
35
- }
36
 
37
- error_count = 0
38
- pbar = tqdm(total=None, desc="Loading model")
39
-
40
- while True:
41
- print("Sending request to API...")
42
- response = requests.post(api_url, headers=headers, json=payload)
43
- print("API response status code:", response.status_code)
44
-
45
- if response.status_code == 200:
46
- print("Image generation successful!")
47
- return Image.open(BytesIO(response.content))
48
- elif response.status_code == 503:
49
- time.sleep(1)
50
- pbar.update(1)
51
- elif response.status_code == 500 and error_count < 5:
52
- time.sleep(1)
53
- error_count += 1
54
- else:
55
- print("API Error:", response.status_code)
56
- raise Exception(f"API Error: {response.status_code}")
57
 
58
  # Gradio Interface
59
  iface = gr.Interface(
@@ -70,4 +43,4 @@ iface = gr.Interface(
70
  )
71
 
72
  print("Launching Gradio interface...")
73
- iface.launch()
 
1
  import gradio as gr
2
+ from diffusers import DiffusionPipeline
3
+ import torch
4
  from PIL import Image
5
+ import tempfile
 
 
6
 
7
+ print("Loading Rookus T1 model...")
8
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16)
9
+ pipe.to("cuda" if torch.cuda.is_available() else "cpu")
10
+ pipe.set_progress_bar_config(disable=True)
11
 
12
  def infer(color_prompt, dress_type_prompt, design_prompt):
13
+ # Construct prompt
14
  prompt = (
15
  f"A high-quality digital image of a {color_prompt} {dress_type_prompt}, "
16
+ f"featuring a {design_prompt} printed in sharp detail on the {dress_type_prompt}, "
17
+ f"facing front, hanging on a plain wall. "
18
+ f"The fabric has realistic texture, smooth folds, and accurate lighting. "
19
+ f"The design is perfectly aligned, with natural shadows and highlights, "
20
+ f"creating a photorealistic look."
21
  )
22
+
23
  print("Generating image with prompt:", prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # Generate image
26
+ result = pipe(prompt, num_inference_steps=4, guidance_scale=0.0) # SDXL-Turbo is optimized for fast steps
27
+ image = result.images[0]
28
+
29
+ return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
  # Gradio Interface
32
  iface = gr.Interface(
 
43
  )
44
 
45
  print("Launching Gradio interface...")
46
+ iface.launch()