Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -13,11 +13,11 @@ import base64
|
|
13 |
|
14 |
|
15 |
def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed):
|
16 |
-
result = {"prompt": prompt,"negative_prompt": negative_prompt,"task": task,"steps": steps,"sampler": sampler,"cfg_scale": cfg_scale,"seed": seed}
|
17 |
print(result)
|
18 |
|
19 |
language = detect(prompt)
|
20 |
-
|
21 |
if language == 'ru':
|
22 |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
|
23 |
print(prompt)
|
@@ -25,37 +25,37 @@ def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed):
|
|
25 |
cfg = int(cfg_scale)
|
26 |
steps = int(steps)
|
27 |
seed = int(seed)
|
28 |
-
url_sd1 =
|
29 |
-
url_sd2 =
|
30 |
-
url_sd3 =
|
31 |
|
32 |
if task == 'Realistic Vision 5.0':
|
33 |
-
model = '
|
34 |
-
|
35 |
model = 'dreamshaper_8.safetensors+%5B9d40847d%5D'
|
36 |
-
|
37 |
model = 'deliberate_v3.safetensors+%5Bafd9d2d4%5D'
|
38 |
-
|
39 |
model = 'analog-diffusion-1.0.ckpt+%5B9ca13f02%5D'
|
40 |
-
|
41 |
model = 'lyriel_v16.safetensors+%5B68fceea2%5D'
|
42 |
-
|
43 |
model = 'elldreths-vivid-mix.safetensors+%5B342d9d26%5D'
|
44 |
-
|
45 |
model = 'anything-v4.5-pruned.ckpt+%5B65745d25%5D'
|
46 |
-
|
47 |
model = 'openjourney_V4.ckpt+%5Bca2f377f%5D'
|
48 |
-
|
49 |
model = 'absolutereality_v181.safetensors+%5B3d9d4d2b%5D'
|
50 |
-
|
51 |
model = 'epicrealism_naturalSinRC1VAE.safetensors+%5B90a4c676%5D'
|
52 |
-
|
53 |
model = 'cyberrealistic_v33.safetensors+%5B82b0d085%5D'
|
54 |
-
|
55 |
model = 'toonyou_beta6.safetensors+%5B980f6b15%5D'
|
56 |
|
57 |
c = 0
|
58 |
-
r = requests.get(f'{url_sd1}{prompt}&model={model}&negative_prompt={negative_prompt}&steps={steps}&cfg={cfg}&seed={seed}&sampler={sampler}&aspect_ratio=square', timeout=10)
|
59 |
job = r.json()['job']
|
60 |
while c < 10:
|
61 |
c += 1
|
@@ -73,96 +73,4 @@ def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed):
|
|
73 |
|
74 |
def mirror(image_output, scale_by, method, gfpgan, codeformer):
|
75 |
|
76 |
-
url_up =
|
77 |
-
url_up_f = os.getenv("url_up_f")
|
78 |
-
|
79 |
-
scale_by = int(scale_by)
|
80 |
-
gfpgan = int(gfpgan)
|
81 |
-
codeformer = int(codeformer)
|
82 |
-
|
83 |
-
with open(image_output, "rb") as image_file:
|
84 |
-
encoded_string2 = base64.b64encode(image_file.read())
|
85 |
-
encoded_string2 = str(encoded_string2).replace("b'", '')
|
86 |
-
|
87 |
-
encoded_string2 = "data:image/png;base64," + encoded_string2
|
88 |
-
data = {"fn_index":81,"data":[0,0,encoded_string2,None,"","",True,gfpgan,codeformer,0,scale_by,512,512,None,method,"None",1,False,[],"",""],"session_hash":""}
|
89 |
-
print(data)
|
90 |
-
r = requests.post(f"{url_up}", json=data, timeout=100)
|
91 |
-
print(r.text)
|
92 |
-
ph = f"{url_up_f}" + str(r.json()['data'][0][0]['name'])
|
93 |
-
return ph
|
94 |
-
|
95 |
-
css = """
|
96 |
-
#generate {
|
97 |
-
width: 100%;
|
98 |
-
background: #e253dd !important;
|
99 |
-
border: none;
|
100 |
-
border-radius: 50px;
|
101 |
-
outline: none !important;
|
102 |
-
color: white;
|
103 |
-
}
|
104 |
-
#generate:hover {
|
105 |
-
background: #de6bda !important;
|
106 |
-
outline: none !important;
|
107 |
-
color: #fff;
|
108 |
-
}
|
109 |
-
#image_output {
|
110 |
-
display: flex;
|
111 |
-
justify-content: center;
|
112 |
-
}
|
113 |
-
footer {visibility: hidden !important;}
|
114 |
-
|
115 |
-
#image_output {
|
116 |
-
height: 100% !important;
|
117 |
-
}
|
118 |
-
"""
|
119 |
-
|
120 |
-
with gr.Blocks(css=css) as demo:
|
121 |
-
|
122 |
-
with gr.Tab("Базовые настройки"):
|
123 |
-
with gr.Row():
|
124 |
-
prompt = gr.Textbox(placeholder="Введите описание изображения...", show_label=True, label='Описание изображения:', lines=3)
|
125 |
-
with gr.Row():
|
126 |
-
task = gr.Radio(interactive=True, value="Deliberate 3", show_label=True, label="Модель нейросети:",
|
127 |
-
choices=["AbsoluteReality 1.8.1", "Elldreth's Vivid Mix", "Anything V5", "Openjourney V4", "Analog Diffusion",
|
128 |
-
"Lyriel 1.6", "Realistic Vision 5.0", "Dreamshaper 8", "epiCRealism v5",
|
129 |
-
"CyberRealistic 3.3", "ToonYou 6", "Deliberate 3"])
|
130 |
-
with gr.Tab("Расширенные настройки"):
|
131 |
-
with gr.Row():
|
132 |
-
negative_prompt = gr.Textbox(placeholder="Negative Prompt", show_label=True, label='Negative Prompt:', lines=3, value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry")
|
133 |
-
with gr.Row():
|
134 |
-
sampler = gr.Dropdown(value="DPM++ SDE Karras", show_label=True, label="Sampling Method:", choices=[
|
135 |
-
"Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"])
|
136 |
-
with gr.Row():
|
137 |
-
steps = gr.Slider(show_label=True, label="Sampling Steps:", minimum=1, maximum=30, value=25, step=1)
|
138 |
-
with gr.Row():
|
139 |
-
cfg_scale = gr.Slider(show_label=True, label="CFG Scale:", minimum=1, maximum=20, value=7, step=1)
|
140 |
-
with gr.Row():
|
141 |
-
seed = gr.Number(show_label=True, label="Seed:", minimum=-1, maximum=1000000, value=-1, step=1)
|
142 |
-
|
143 |
-
with gr.Tab("Настройки апскейлинга"):
|
144 |
-
with gr.Column():
|
145 |
-
with gr.Row():
|
146 |
-
scale_by = gr.Number(show_label=True, label="Во сколько раз увеличить:", minimum=1, maximum=4, value=2, step=1)
|
147 |
-
with gr.Row():
|
148 |
-
method = gr.Dropdown(show_label=True, value="ESRGAN_4x", label="Алгоритм увеличения", choices=["ScuNET GAN", "SwinIR 4x", "ESRGAN_4x", "R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"])
|
149 |
-
with gr.Column():
|
150 |
-
with gr.Row():
|
151 |
-
gfpgan = gr.Slider(show_label=True, label="Эффект GFPGAN (для улучшения лица)", minimum=0, maximum=1, value=0, step=0.1)
|
152 |
-
with gr.Row():
|
153 |
-
codeformer = gr.Slider(show_label=True, label="Эффект CodeFormer (для улучшения лица)", minimum=0, maximum=1, value=0, step=0.1)
|
154 |
-
|
155 |
-
|
156 |
-
with gr.Column():
|
157 |
-
text_button = gr.Button("Сгенерировать изображение", variant='primary', elem_id="generate")
|
158 |
-
with gr.Column():
|
159 |
-
image_output = gr.Image(show_label=True, show_download_button=True, interactive=False, label='Результат:', elem_id='image_output', type='filepath')
|
160 |
-
text_button.click(flip_text, inputs=[prompt, negative_prompt, task, steps, sampler, cfg_scale, seed], outputs=image_output)
|
161 |
-
|
162 |
-
img2img_b = gr.Button("Увеличить изображение", variant='secondary')
|
163 |
-
image_i2i = gr.Image(show_label=True, label='Увеличенное изображение:')
|
164 |
-
img2img_b.click(mirror, inputs=[image_output, scale_by, method, gfpgan, codeformer], outputs=image_i2i)
|
165 |
-
|
166 |
-
|
167 |
-
demo.queue(concurrency_count=24)
|
168 |
-
demo.launch()
|
|
|
13 |
|
14 |
|
15 |
def flip_text(prompt, negative_prompt, task, steps, sampler, cfg_scale, seed):
|
16 |
+
result = {"prompt": prompt, "negative_prompt": negative_prompt, "task": task, "steps": steps, "sampler": sampler, "cfg_scale": cfg_scale, "seed": seed}
|
17 |
print(result)
|
18 |
|
19 |
language = detect(prompt)
|
20 |
+
|
21 |
if language == 'ru':
|
22 |
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
|
23 |
print(prompt)
|
|
|
25 |
cfg = int(cfg_scale)
|
26 |
steps = int(steps)
|
27 |
seed = int(seed)
|
28 |
+
url_sd1 = "https://stable-diffusion-open.api.replicate.com/infer"
|
29 |
+
url_sd2 = "https://api.replicate.com/predictions/10720/jobs/"
|
30 |
+
url_sd3 = "https://stable-diffusion-open.api.replicate.com/predictions/10720/output/"
|
31 |
|
32 |
if task == 'Realistic Vision 5.0':
|
33 |
+
model = 'Realistic Vision V5.0.safetensors+%5B614d1063%5D'
|
34 |
+
elif task == 'Dreamshaper 8':
|
35 |
model = 'dreamshaper_8.safetensors+%5B9d40847d%5D'
|
36 |
+
elif task == 'Deliberate 3':
|
37 |
model = 'deliberate_v3.safetensors+%5Bafd9d2d4%5D'
|
38 |
+
elif task == 'Analog Diffusion':
|
39 |
model = 'analog-diffusion-1.0.ckpt+%5B9ca13f02%5D'
|
40 |
+
elif task == 'Lyriel 1.6':
|
41 |
model = 'lyriel_v16.safetensors+%5B68fceea2%5D'
|
42 |
+
elif task == "Elldreth's Vivid Mix":
|
43 |
model = 'elldreths-vivid-mix.safetensors+%5B342d9d26%5D'
|
44 |
+
elif task == 'Anything V5':
|
45 |
model = 'anything-v4.5-pruned.ckpt+%5B65745d25%5D'
|
46 |
+
elif task == 'Openjourney V4':
|
47 |
model = 'openjourney_V4.ckpt+%5Bca2f377f%5D'
|
48 |
+
elif task == 'AbsoluteReality 1.8.1':
|
49 |
model = 'absolutereality_v181.safetensors+%5B3d9d4d2b%5D'
|
50 |
+
elif task == 'epiCRealism v5':
|
51 |
model = 'epicrealism_naturalSinRC1VAE.safetensors+%5B90a4c676%5D'
|
52 |
+
elif task == 'CyberRealistic 3.3':
|
53 |
model = 'cyberrealistic_v33.safetensors+%5B82b0d085%5D'
|
54 |
+
elif task == 'ToonYou 6':
|
55 |
model = 'toonyou_beta6.safetensors+%5B980f6b15%5D'
|
56 |
|
57 |
c = 0
|
58 |
+
r = requests.get(f'{url_sd1}?prompt={prompt}&model={model}&negative_prompt={negative_prompt}&steps={steps}&cfg={cfg}&seed={seed}&sampler={sampler}&aspect_ratio=square', timeout=10)
|
59 |
job = r.json()['job']
|
60 |
while c < 10:
|
61 |
c += 1
|
|
|
73 |
|
74 |
def mirror(image_output, scale_by, method, gfpgan, codeformer):
|
75 |
|
76 |
+
url_up = "https://scale-diffusion-open.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|