Profakerr commited on
Commit
03839a8
·
verified ·
1 Parent(s): a854311

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +107 -25
app.py CHANGED
@@ -3,23 +3,68 @@ from transformers import CLIPTextModel, CLIPTokenizer
3
  import torch
4
  import gradio as gr
5
  import spaces
 
 
 
 
 
 
6
 
 
 
 
7
 
8
- lora1 = "OedoSoldier/detail-tweaker-lora"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  @spaces.GPU
11
- def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_scale=7.0,model="Real6.0",num_images=1, width=512, height=512):
12
-
 
13
  if model == "Real5.0":
14
  model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
15
-
16
  elif model == "Real5.1":
17
  model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
18
-
19
  else:
20
  model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
21
 
22
-
23
  vae = AutoencoderKL.from_pretrained(
24
  model_id,
25
  subfolder="vae"
@@ -47,13 +92,30 @@ def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_sca
47
  vae=vae
48
  ).to("cuda")
49
 
50
- pipe.load_lora_weights(lora1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  if model == "Real6.0":
53
  pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
54
 
55
-
56
-
57
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(
58
  pipe.scheduler.config,
59
  algorithm_type="dpmsolver++",
@@ -79,7 +141,6 @@ def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_sca
79
  prompt_embeds = text_encoder(text_inputs.input_ids)[0]
80
  negative_prompt_embeds = text_encoder(negative_text_inputs.input_ids)[0]
81
 
82
-
83
  # Generate the image
84
  result = pipe(
85
  prompt_embeds=prompt_embeds,
@@ -94,6 +155,17 @@ def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_sca
94
 
95
  return result.images
96
 
 
 
 
 
 
 
 
 
 
 
 
97
  title = """<h1 align="center">ProFaker</h1>"""
98
  # Create the Gradio interface
99
  with gr.Blocks() as demo:
@@ -112,28 +184,34 @@ with gr.Blocks() as demo:
112
  info="Enter what you don't want in Image...",
113
  lines=3
114
  )
 
 
 
 
 
 
115
  generate_button = gr.Button("Generate Image")
 
116
  with gr.Accordion("Advanced Options", open=False):
117
-
118
  model = gr.Dropdown(
119
  choices=["Real6.0","Real5.1","Real5.0"],
120
  value="Real6.0",
121
  label="Model",
122
  )
123
 
124
- num_images = gr.Slider( # New slider for number of images
125
- minimum=1,
126
- maximum=4,
127
- value=1,
128
- step=1,
129
- label="Number of Images to Generate"
130
  )
131
  width = gr.Slider(
132
- minimum=256,
133
- maximum=1024,
134
- value=512,
135
- step=64,
136
- label="Image Width"
137
  )
138
  height = gr.Slider(
139
  minimum=256,
@@ -156,6 +234,7 @@ with gr.Blocks() as demo:
156
  step=0.5,
157
  label="Guidance Scale"
158
  )
 
159
  with gr.Column():
160
  # Output component
161
  gallery = gr.Gallery(
@@ -165,13 +244,16 @@ with gr.Blocks() as demo:
165
  columns=2,
166
  rows=2
167
  )
168
-
169
 
170
  # Connect the interface to the generation function
171
  generate_button.click(
172
  fn=generate_image,
173
- inputs=[prompt, negative_prompt, steps_slider, guidance_slider, model, num_images, width, height],
 
174
  outputs=gallery
175
  )
 
 
 
176
 
177
- demo.queue(max_size=10).launch(share=False)
 
3
  import torch
4
  import gradio as gr
5
  import spaces
6
+ from huggingface_hub import hf_hub_download
7
+ import os
8
+ import requests
9
+ import hashlib
10
+ from pathlib import Path
11
+ import re
12
 
13
+ # Default LoRA for fallback
14
+ DEFAULT_LORA = "OedoSoldier/detail-tweaker-lora"
15
+ LORA_CACHE_DIR = "lora_cache"
16
 
17
+ def download_lora(url):
18
+ """Download LoRA file from Civitai URL and cache it locally"""
19
+ # Create cache directory if it doesn't exist
20
+ os.makedirs(LORA_CACHE_DIR, exist_ok=True)
21
+
22
+ # Generate a filename from the URL
23
+ url_hash = hashlib.md5(url.encode()).hexdigest()
24
+ local_path = os.path.join(LORA_CACHE_DIR, f"{url_hash}.safetensors")
25
+
26
+ # If file already exists in cache, return the path
27
+ if os.path.exists(local_path):
28
+ return local_path
29
+
30
+ # Download the file
31
+ try:
32
+ response = requests.get(url, stream=True)
33
+ response.raise_for_status()
34
+
35
+ # Get the total file size
36
+ total_size = int(response.headers.get('content-length', 0))
37
+
38
+ # Download and save the file
39
+ with open(local_path, 'wb') as f:
40
+ if total_size == 0:
41
+ f.write(response.content)
42
+ else:
43
+ for chunk in response.iter_content(chunk_size=8192):
44
+ if chunk:
45
+ f.write(chunk)
46
+
47
+ return local_path
48
+ except Exception as e:
49
+ print(f"Error downloading LoRA: {str(e)}")
50
+ return None
51
+
52
+ def is_civitai_url(url):
53
+ """Check if the URL is a valid Civitai download URL"""
54
+ return bool(re.match(r'https?://civitai\.com/api/download/models/\d+', url))
55
 
56
  @spaces.GPU
57
+ def generate_image(prompt, negative_prompt, lora_url, num_inference_steps=30, guidance_scale=7.0,
58
+ model="Real6.0", num_images=1, width=512, height=512):
59
+
60
  if model == "Real5.0":
61
  model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
 
62
  elif model == "Real5.1":
63
  model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
 
64
  else:
65
  model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
66
 
67
+ # Initialize models
68
  vae = AutoencoderKL.from_pretrained(
69
  model_id,
70
  subfolder="vae"
 
92
  vae=vae
93
  ).to("cuda")
94
 
95
+ # Load LoRA weights
96
+ try:
97
+ if lora_url and lora_url.strip():
98
+ if is_civitai_url(lora_url):
99
+ # Download and load Civitai LoRA
100
+ lora_path = download_lora(lora_url)
101
+ if lora_path:
102
+ pipe.load_lora_weights(lora_path)
103
+ else:
104
+ pipe.load_lora_weights(DEFAULT_LORA)
105
+ # If it's a HuggingFace repo path
106
+ elif '/' in lora_url and not lora_url.startswith('http'):
107
+ pipe.load_lora_weights(lora_url)
108
+ else:
109
+ pipe.load_lora_weights(DEFAULT_LORA)
110
+ else:
111
+ pipe.load_lora_weights(DEFAULT_LORA)
112
+ except Exception as e:
113
+ print(f"Error loading LoRA weights: {str(e)}")
114
+ pipe.load_lora_weights(DEFAULT_LORA)
115
 
116
  if model == "Real6.0":
117
  pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
118
 
 
 
119
  pipe.scheduler = DPMSolverMultistepScheduler.from_config(
120
  pipe.scheduler.config,
121
  algorithm_type="dpmsolver++",
 
141
  prompt_embeds = text_encoder(text_inputs.input_ids)[0]
142
  negative_prompt_embeds = text_encoder(negative_text_inputs.input_ids)[0]
143
 
 
144
  # Generate the image
145
  result = pipe(
146
  prompt_embeds=prompt_embeds,
 
155
 
156
  return result.images
157
 
158
+ def clean_lora_cache():
159
+ """Clean the LoRA cache directory"""
160
+ if os.path.exists(LORA_CACHE_DIR):
161
+ for file in os.listdir(LORA_CACHE_DIR):
162
+ file_path = os.path.join(LORA_CACHE_DIR, file)
163
+ try:
164
+ if os.path.isfile(file_path):
165
+ os.unlink(file_path)
166
+ except Exception as e:
167
+ print(f"Error deleting {file_path}: {str(e)}")
168
+
169
  title = """<h1 align="center">ProFaker</h1>"""
170
  # Create the Gradio interface
171
  with gr.Blocks() as demo:
 
184
  info="Enter what you don't want in Image...",
185
  lines=3
186
  )
187
+ lora_input = gr.Textbox(
188
+ label="LoRA URL/Path",
189
+ info="Enter Civitai download URL or HuggingFace path (e.g., 'username/model-name')",
190
+ value=DEFAULT_LORA
191
+ )
192
+ clear_cache = gr.Button("Clear LoRA Cache")
193
  generate_button = gr.Button("Generate Image")
194
+
195
  with gr.Accordion("Advanced Options", open=False):
 
196
  model = gr.Dropdown(
197
  choices=["Real6.0","Real5.1","Real5.0"],
198
  value="Real6.0",
199
  label="Model",
200
  )
201
 
202
+ num_images = gr.Slider(
203
+ minimum=1,
204
+ maximum=4,
205
+ value=1,
206
+ step=1,
207
+ label="Number of Images to Generate"
208
  )
209
  width = gr.Slider(
210
+ minimum=256,
211
+ maximum=1024,
212
+ value=512,
213
+ step=64,
214
+ label="Image Width"
215
  )
216
  height = gr.Slider(
217
  minimum=256,
 
234
  step=0.5,
235
  label="Guidance Scale"
236
  )
237
+
238
  with gr.Column():
239
  # Output component
240
  gallery = gr.Gallery(
 
244
  columns=2,
245
  rows=2
246
  )
 
247
 
248
  # Connect the interface to the generation function
249
  generate_button.click(
250
  fn=generate_image,
251
+ inputs=[prompt, negative_prompt, lora_input, steps_slider, guidance_slider,
252
+ model, num_images, width, height],
253
  outputs=gallery
254
  )
255
+
256
+ # Connect clear cache button
257
+ clear_cache.click(fn=clean_lora_cache)
258
 
259
+ demo.queue(max_size=10).launch(share=False)