Spaces:
Running
on
Zero
Running
on
Zero
Upload 12 files
Browse files- app.py +5 -4
- dc.py +7 -3
- modutils.py +11 -0
- requirements.txt +3 -4
- tagger/tagger.py +12 -21
app.py
CHANGED
@@ -51,8 +51,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
51 |
prompt = gr.Text(label="Prompt", show_label=False, lines=1, max_lines=8, placeholder="Enter your prompt", container=False)
|
52 |
|
53 |
with gr.Row():
|
54 |
-
run_button = gr.Button("Run")
|
55 |
-
run_translate_button = gr.Button("
|
|
|
56 |
|
57 |
result = gr.Image(label="Result", elem_id="result", format="png", show_label=False, interactive=False,
|
58 |
show_download_button=True, show_share_button=False, container=True)
|
@@ -174,7 +175,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
174 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
175 |
guidance_scale, num_inference_steps, model_name,
|
176 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
177 |
-
sampler, vae_model],
|
178 |
outputs=[result],
|
179 |
queue=True,
|
180 |
show_progress="full",
|
@@ -187,7 +188,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
|
|
187 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
188 |
guidance_scale, num_inference_steps, model_name,
|
189 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
190 |
-
sampler, vae_model],
|
191 |
outputs=[result],
|
192 |
queue=False,
|
193 |
show_api=True,
|
|
|
51 |
prompt = gr.Text(label="Prompt", show_label=False, lines=1, max_lines=8, placeholder="Enter your prompt", container=False)
|
52 |
|
53 |
with gr.Row():
|
54 |
+
run_button = gr.Button("Run", variant="primary", scale=5)
|
55 |
+
run_translate_button = gr.Button("Run with LLM Enhance", variant="secondary", scale=3)
|
56 |
+
auto_trans = gr.Checkbox(label="Auto translate to English", value=True, scale=2)
|
57 |
|
58 |
result = gr.Image(label="Result", elem_id="result", format="png", show_label=False, interactive=False,
|
59 |
show_download_button=True, show_share_button=False, container=True)
|
|
|
175 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
176 |
guidance_scale, num_inference_steps, model_name,
|
177 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
178 |
+
sampler, vae_model, auto_trans],
|
179 |
outputs=[result],
|
180 |
queue=True,
|
181 |
show_progress="full",
|
|
|
188 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
|
189 |
guidance_scale, num_inference_steps, model_name,
|
190 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
|
191 |
+
sampler, vae_model, auto_trans],
|
192 |
outputs=[result],
|
193 |
queue=False,
|
194 |
show_api=True,
|
dc.py
CHANGED
@@ -697,14 +697,14 @@ from pathlib import Path
|
|
697 |
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
|
698 |
get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
|
699 |
get_valid_lora_path, get_valid_lora_wt, get_lora_info,
|
700 |
-
normalize_prompt_list, get_civitai_info, search_lora_on_civitai)
|
701 |
|
702 |
sd_gen = GuiSD()
|
703 |
#@spaces.GPU
|
704 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
705 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
706 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
707 |
-
sampler = "Euler a", vae = None, progress=gr.Progress(track_tqdm=True)):
|
708 |
import PIL
|
709 |
import numpy as np
|
710 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -718,6 +718,10 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
718 |
|
719 |
generator = torch.Generator().manual_seed(seed).seed()
|
720 |
|
|
|
|
|
|
|
|
|
721 |
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
|
722 |
progress(0.5, desc="Preparing...")
|
723 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
|
@@ -752,7 +756,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
752 |
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
753 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
754 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
755 |
-
sampler = "Euler a", vae = None, progress=gr.Progress(track_tqdm=True)):
|
756 |
return gr.update(visible=True)
|
757 |
|
758 |
|
|
|
697 |
from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
|
698 |
get_local_model_list, get_private_lora_model_lists, get_valid_lora_name,
|
699 |
get_valid_lora_path, get_valid_lora_wt, get_lora_info,
|
700 |
+
normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en)
|
701 |
|
702 |
sd_gen = GuiSD()
|
703 |
#@spaces.GPU
|
704 |
def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
705 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
706 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
707 |
+
sampler = "Euler a", vae = None, translate=True, progress=gr.Progress(track_tqdm=True)):
|
708 |
import PIL
|
709 |
import numpy as np
|
710 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
718 |
|
719 |
generator = torch.Generator().manual_seed(seed).seed()
|
720 |
|
721 |
+
if translate:
|
722 |
+
prompt = translate_to_en(prompt)
|
723 |
+
negative_prompt = translate_to_en(prompt)
|
724 |
+
|
725 |
prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name)
|
726 |
progress(0.5, desc="Preparing...")
|
727 |
lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
|
|
|
756 |
def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
|
757 |
model_name = load_diffusers_format_model[0], lora1 = None, lora1_wt = 1.0, lora2 = None, lora2_wt = 1.0,
|
758 |
lora3 = None, lora3_wt = 1.0, lora4 = None, lora4_wt = 1.0, lora5 = None, lora5_wt = 1.0,
|
759 |
+
sampler = "Euler a", vae = None, translate = True, progress=gr.Progress(track_tqdm=True)):
|
760 |
return gr.update(visible=True)
|
761 |
|
762 |
|
modutils.py
CHANGED
@@ -27,6 +27,17 @@ def list_sub(a, b):
|
|
27 |
return [e for e in a if e not in b]
|
28 |
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def get_local_model_list(dir_path):
|
31 |
model_list = []
|
32 |
valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
|
|
|
27 |
return [e for e in a if e not in b]
|
28 |
|
29 |
|
30 |
+
from translatepy import Translator
|
31 |
+
translator = Translator()
|
32 |
+
def translate_to_en(input: str):
|
33 |
+
try:
|
34 |
+
output = str(translator.translate(input, 'English'))
|
35 |
+
except Exception as e:
|
36 |
+
output = input
|
37 |
+
print(e)
|
38 |
+
return output
|
39 |
+
|
40 |
+
|
41 |
def get_local_model_list(dir_path):
|
42 |
model_list = []
|
43 |
valid_extensions = ('.ckpt', '.pt', '.pth', '.safetensors', '.bin')
|
requirements.txt
CHANGED
@@ -17,8 +17,7 @@ rapidfuzz
|
|
17 |
torchvision
|
18 |
optimum[onnxruntime]
|
19 |
dartrs
|
20 |
-
|
21 |
-
httpcore
|
22 |
-
googletrans==4.0.0rc1
|
23 |
timm
|
24 |
-
wrapt-timeout-decorator
|
|
|
|
17 |
torchvision
|
18 |
optimum[onnxruntime]
|
19 |
dartrs
|
20 |
+
translatepy
|
|
|
|
|
21 |
timm
|
22 |
+
wrapt-timeout-decorator
|
23 |
+
sentencepiece
|
tagger/tagger.py
CHANGED
@@ -1,11 +1,8 @@
|
|
|
|
1 |
from PIL import Image
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
-
import
|
5 |
-
from transformers import (
|
6 |
-
AutoImageProcessor,
|
7 |
-
AutoModelForImageClassification,
|
8 |
-
)
|
9 |
from pathlib import Path
|
10 |
|
11 |
|
@@ -190,18 +187,16 @@ def convert_danbooru_to_e621_prompt(input_prompt: str = "", prompt_type: str = "
|
|
190 |
return output_prompt
|
191 |
|
192 |
|
|
|
|
|
193 |
def translate_prompt(prompt: str = ""):
|
194 |
-
def translate_to_english(
|
195 |
-
import httpcore
|
196 |
-
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
|
197 |
-
from googletrans import Translator
|
198 |
-
translator = Translator()
|
199 |
try:
|
200 |
-
|
201 |
-
return translated_prompt
|
202 |
except Exception as e:
|
|
|
203 |
print(e)
|
204 |
-
|
205 |
|
206 |
def is_japanese(s):
|
207 |
import unicodedata
|
@@ -224,17 +219,13 @@ def translate_prompt(prompt: str = ""):
|
|
224 |
|
225 |
|
226 |
def translate_prompt_to_ja(prompt: str = ""):
|
227 |
-
def translate_to_japanese(
|
228 |
-
import httpcore
|
229 |
-
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
|
230 |
-
from googletrans import Translator
|
231 |
-
translator = Translator()
|
232 |
try:
|
233 |
-
|
234 |
-
return translated_prompt
|
235 |
except Exception as e:
|
|
|
236 |
print(e)
|
237 |
-
|
238 |
|
239 |
def is_japanese(s):
|
240 |
import unicodedata
|
|
|
1 |
+
import spaces
|
2 |
from PIL import Image
|
3 |
import torch
|
4 |
import gradio as gr
|
5 |
+
from transformers import AutoImageProcessor, AutoModelForImageClassification
|
|
|
|
|
|
|
|
|
6 |
from pathlib import Path
|
7 |
|
8 |
|
|
|
187 |
return output_prompt
|
188 |
|
189 |
|
190 |
+
from translatepy import Translator
|
191 |
+
translator = Translator()
|
192 |
def translate_prompt(prompt: str = ""):
|
193 |
+
def translate_to_english(input: str):
|
|
|
|
|
|
|
|
|
194 |
try:
|
195 |
+
output = str(translator.translate(input, 'English'))
|
|
|
196 |
except Exception as e:
|
197 |
+
output = input
|
198 |
print(e)
|
199 |
+
return output
|
200 |
|
201 |
def is_japanese(s):
|
202 |
import unicodedata
|
|
|
219 |
|
220 |
|
221 |
def translate_prompt_to_ja(prompt: str = ""):
|
222 |
+
def translate_to_japanese(input: str):
|
|
|
|
|
|
|
|
|
223 |
try:
|
224 |
+
output = str(translator.translate(input, 'Japanese'))
|
|
|
225 |
except Exception as e:
|
226 |
+
output = input
|
227 |
print(e)
|
228 |
+
return output
|
229 |
|
230 |
def is_japanese(s):
|
231 |
import unicodedata
|