DigiP-AI commited on
Commit
5a2a081
·
verified ·
1 Parent(s): 58c87fd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -0
app.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import io
3
+ import random
4
+ import os
5
+ import time
6
+ import numpy as np
7
+ import subprocess
8
+ import torch
9
+ import json
10
+ from transformers import AutoProcessor, AutoModelForCausalLM
11
+ from PIL import Image
12
+ from deep_translator import GoogleTranslator
13
+ from datetime import datetime
14
+ from model import models
15
+ from theme import theme
16
+ from fastapi import FastAPI
17
+
18
+ app = FastAPI()
19
+
20
+
21
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
22
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
23
+ timeout = 100
24
+ max_images = 6
25
+
26
+ def flip_image(x):
27
+ return np.fliplr(x)
28
+
29
+ def clear():
30
+ return None
31
+
32
+ def query(lora_id, prompt, is_negative=False, steps=28, cfg_scale=3.5, sampler="DPM++ 2M Karras", seed=-1, strength=100, width=896, height=1152):
33
+ if prompt == "" or prompt == None:
34
+ return None
35
+
36
+ if lora_id.strip() == "" or lora_id == None:
37
+ lora_id = "black-forest-labs/FLUX.1-dev"
38
+
39
+ key = random.randint(0, 999)
40
+
41
+ API_URL = "https://api-inference.huggingface.co/models/"+ lora_id.strip()
42
+
43
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN")])
44
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
45
+
46
+ # prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
47
+ # print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
48
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
49
+ print(f'\033[1mGeneration {key} translation:\033[0m {prompt}')
50
+
51
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
52
+ print(f'\033[1mGeneration {key}:\033[0m {prompt}')
53
+
54
+ # If seed is -1, generate a random seed and use it
55
+ if seed == -1:
56
+ seed = random.randint(1, 1000000000)
57
+
58
+ # Prepare the payload for the API call, including width and height
59
+ payload = {
60
+ "inputs": prompt,
61
+ "is_negative": is_negative,
62
+ "steps": steps,
63
+ "cfg_scale": cfg_scale,
64
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
65
+ "strength": strength,
66
+ "parameters": {
67
+ "width": width, # Pass the width to the API
68
+ "height": height # Pass the height to the API
69
+ }
70
+ }
71
+
72
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
73
+ if response.status_code != 200:
74
+ print(f"Error: Failed to get image. Response status: {response.status_code}")
75
+ print(f"Response content: {response.text}")
76
+ if response.status_code == 503:
77
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
78
+ raise gr.Error(f"{response.status_code}")
79
+
80
+ try:
81
+ image_bytes = response.content
82
+ image = Image.open(io.BytesIO(image_bytes))
83
+ print(f'\033[1mGeneration {key} completed!\033[0m ({prompt})')
84
+ return image, seed
85
+ except Exception as e:
86
+ print(f"Error when trying to open the image: {e}")
87
+ return None
88
+
89
+ with gr.Group():
90
+ examples = [
91
+ "a beautiful woman with blonde hair and blue eyes",
92
+ "a beautiful woman with brown hair and grey eyes",
93
+ "a beautiful woman with black hair and brown eyes",
94
+ ]
95
+
96
+
97
+
98
+ css = """
99
+ .title { font-size: 3em; align-items: center; text-align: center; }
100
+ .info { align-items: center; text-align: center; }
101
+ .model_info { text-align: center; }
102
+ .output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
103
+ .gallery { min_width=512px; min_height=512px; max_height=1024px; !important; }
104
+ """
105
+
106
+ with gr.Blocks(theme=theme, fill_width=True, css=css) as app:
107
+ with gr.Tab("Image Generator"):
108
+ with gr.Row():
109
+ with gr.Column(scale=10, elem_id="prompt-container"):
110
+ with gr.Group():
111
+ with gr.Row(equal_height=True):
112
+ text_prompt = gr.Textbox(label="Image Prompt ✍️", placeholder="Enter a prompt here", lines=2, show_copy_button = True, elem_id="prompt-text-input")
113
+ with gr.Row():
114
+ with gr.Accordion("🎨 Lora trigger words", open=False):
115
+ gr.Markdown("""
116
+ - **Canopus-Pencil-Art-LoRA**: Pencil Art
117
+ - **Flux-Realism-FineDetailed**: Fine Detailed
118
+ - **Fashion-Hut-Modeling-LoRA**: Modeling
119
+ - **SD3.5-Large-Turbo-HyperRealistic-LoRA**: hyper realistic
120
+ - **Flux-Fine-Detail-LoRA**: Super Detail
121
+ - **SD3.5-Turbo-Realism-2.0-LoRA**: Turbo Realism
122
+ - **Canopus-LoRA-Flux-UltraRealism-2.0**: Ultra realistic
123
+ - **Canopus-Pencil-Art-LoRA**: Pencil Art
124
+ - **SD3.5-Large-Photorealistic-LoRA**: photorealistic
125
+ - **Flux.1-Dev-LoRA-HDR-Realism**: HDR
126
+ - **prithivMLmods/Ton618-Epic-Realism-Flux-LoRA**: Epic Realism
127
+ - **john-singer-sargent-style**: John Singer Sargent Style
128
+ - **alphonse-mucha-style**: Alphonse Mucha Style
129
+ - **ultra-realistic-illustration**: ultra realistic illustration
130
+ - **eye-catching**: eye-catching
131
+ - **john-constable-style**: John Constable Style
132
+ - **film-noir**: in the style of FLMNR
133
+ - **flux-lora-pro-headshot**: PROHEADSHOT
134
+ """)
135
+ with gr.Row():
136
+ custom_lora = gr.Dropdown(label="Select Model", choices=list(loaded_models.keys()), value=list(loaded_models.keys())[0], allow_custom_value=True)
137
+ with gr.Accordion("Advanced options", open=False):
138
+ negative_prompt = gr.Textbox(label="Negative Prompt", lines=5, placeholder="What should not be in the image", value="(((hands:-1.25))), physical-defects:2, unhealthy-deformed-joints:2, unhealthy-hands:2, out of frame, (((bad face))), (bad-image-v2-39000:1.3), (((out of frame))), deformed body features, (((poor facial details))), (poorly drawn face:1.3), jpeg artifacts, (missing arms:1.1), (missing legs:1.1), (extra arms:1.2), (extra legs:1.2), [asymmetrical features], warped expressions, distorted eyes")
139
+ with gr.Row(equal_height=True):
140
+ width = gr.Slider(label="Image Width", value=896, minimum=64, maximum=1216, step=32)
141
+ height = gr.Slider(label="Image Height", value=1152, minimum=64, maximum=1216, step=32)
142
+ strength = gr.Slider(label="Prompt Strength", value=100, minimum=0, maximum=100, step=1)
143
+ steps = gr.Slider(label="Sampling steps", value=50, minimum=1, maximum=100, step=1)
144
+ cfg = gr.Slider(label="CFG Scale", value=3.5, minimum=1, maximum=20, step=0.5)
145
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
146
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ 2S a Karras", "DPM2 Karras", "DPM2 a Karras", "DPM++ SDE Karras", "DPM Adaptive", "DPM++ 2M", "DPM2 Ancestral", "DPM++ S", "DPM++ SDE", "DDPM", "DPM Fast", "dpmpp_2s_ancestral", "DEIS", "DDIM", "Euler CFG PP", "Euler", "Euler a", "Euler Ancestral", "Euler+beta", "Heun", "Heun PP2", "LMS", "LMS Karras", "PLMS", "UniPC", "UniPC BH2"])
147
+ with gr.Row(equal_height=True):
148
+ with gr.Accordion("🫘Seed", open=False):
149
+ seed_output = gr.Textbox(label="Seed Used", elem_id="seed-output")
150
+ with gr.Row(equal_height=True):
151
+ image_num = gr.Slider(label="Number of images", minimum=1, maximum=max_images, value=1, step=1, interactive=True, scale=2)
152
+ # Add a button to trigger the image generation
153
+ with gr.Row(equal_height=True):
154
+ text_button = gr.Button("Generate Image 🎨", variant='primary', elem_id="gen-button")
155
+ clear_prompt =gr.Button("Clear Prompt 🗑️",variant="primary", elem_id="clear_button")
156
+ clear_prompt.click(lambda: (None), None, [text_prompt], queue=False, show_api=False)
157
+
158
+ with gr.Column(scale=10):
159
+ with gr.Group():
160
+ with gr.Row():
161
+ image_output = gr.Image(type="pil", label="Image Output", format="png", show_share_button=False, elem_id="gallery")
162
+
163
+ with gr.Group():
164
+ with gr.Row():
165
+ gr.Examples(
166
+ examples = examples,
167
+ inputs = [text_prompt],
168
+ )
169
+
170
+ with gr.Group():
171
+ with gr.Row():
172
+ clear_results = gr.Button(value="Clear Image 🗑️", variant="primary", elem_id="clear_button")
173
+ clear_results.click(lambda: (None), None, [image_output], queue=False, show_api=False)
174
+
175
+ text_button.click(query, inputs=[custom_lora, text_prompt, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=[image_output, seed_output])
176
+
177
+ app.queue(default_concurrency_limit=200, max_size=200) # <-- Sets up a queue with default parameters
178
+ if __name__ == "__main__":
179
+ timeout = 100
180
+ app.launch(show_api=False, share=False)
181
+
182
+
183
+
184
+