DigiP-AI commited on
Commit
e08f875
·
verified ·
1 Parent(s): de26bf5

Update app.back

Browse files
Files changed (1) hide show
  1. app.back +373 -0
app.back CHANGED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import os
4
+ import torch
5
+ import subprocess
6
+ import numpy as np
7
+ from PIL import Image
8
+ from transformers import AutoProcessor, AutoModelForCausalLM
9
+ from diffusers import DiffusionPipeline
10
+ import cv2
11
+ from datetime import datetime
12
+ from theme import theme
13
+ from fastapi import FastAPI
14
+
15
+ app = FastAPI()
16
+
17
+
18
+ def flip_image(x):
19
+ return np.fliplr(x)
20
+
21
+ def basic_filter(image, filter_type):
22
+ """Apply basic image filters"""
23
+ if filter_type == "Gray Toning":
24
+ return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
25
+ elif filter_type == "Sepia":
26
+ sepia_filter = np.array([
27
+ [0.272, 0.534, 0.131],
28
+ [0.349, 0.686, 0.168],
29
+ [0.393, 0.769, 0.189]
30
+ ])
31
+ return cv2.transform(image, sepia_filter)
32
+ elif filter_type == "X-ray":
33
+ # Improved X-ray effect
34
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
35
+ inverted = cv2.bitwise_not(gray)
36
+ # Increase contrast
37
+ clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
38
+ enhanced = clahe.apply(inverted)
39
+ # Sharpen
40
+ kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
41
+ sharpened = cv2.filter2D(enhanced, -1, kernel)
42
+ return cv2.cvtColor(sharpened, cv2.COLOR_GRAY2BGR)
43
+ elif filter_type == "Burn it":
44
+ return cv2.GaussianBlur(image, (15, 15), 0)
45
+
46
+ def classic_filter(image, filter_type):
47
+ """Classical display filters"""
48
+ if filter_type == "Charcoal Effect":
49
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
50
+ inverted = cv2.bitwise_not(gray)
51
+ blurred = cv2.GaussianBlur(inverted, (21, 21), 0)
52
+ sketch = cv2.divide(gray, cv2.subtract(255, blurred), scale=256)
53
+ return cv2.cvtColor(sketch, cv2.COLOR_GRAY2BGR)
54
+
55
+ elif filter_type == "Sharpen":
56
+ kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
57
+ return cv2.filter2D(image, -1, kernel)
58
+
59
+ elif filter_type == "Embossing":
60
+ kernel = np.array([[0,-1,-1], [1,0,-1], [1,1,0]])
61
+ gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
62
+ emboss = cv2.filter2D(gray, -1, kernel) + 128
63
+ return cv2.cvtColor(emboss, cv2.COLOR_GRAY2BGR)
64
+
65
+ elif filter_type == "Edge Detection":
66
+ edges = cv2.Canny(image, 100, 200)
67
+ return cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
68
+
69
+ def creative_filters(image, filter_type):
70
+ """Creative and unusual image filters"""
71
+ if filter_type == "Pixel Art":
72
+ h, w = image.shape[:2]
73
+ piksel_size = 20
74
+ small = cv2.resize(image, (w//piksel_size, h//piksel_size))
75
+ return cv2.resize(small, (w, h), interpolation=cv2.INTER_NEAREST)
76
+
77
+ elif filter_type == "Mosaic Effect":
78
+ h, w = image.shape[:2]
79
+ mosaic_size = 30
80
+ for i in range(0, h, mosaic_size):
81
+ for j in range(0, w, mosaic_size):
82
+ roi = image[i:i+mosaic_size, j:j+mosaic_size]
83
+ if roi.size > 0:
84
+ color = np.mean(roi, axis=(0,1))
85
+ image[i:i+mosaic_size, j:j+mosaic_size] = color
86
+ return image
87
+
88
+ elif filter_type == "Rainbow":
89
+ hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
90
+ h, w = image.shape[:2]
91
+ for i in range(h):
92
+ hsv[i, :, 0] = (hsv[i, :, 0] + i % 180).astype(np.uint8)
93
+ return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
94
+
95
+ elif filter_type == "Night Vision":
96
+ green_image = image.copy()
97
+ green_image[:,:,0] = 0 # Blue channel
98
+ green_image[:,:,2] = 0 # Red channel
99
+ return cv2.addWeighted(green_image, 1.5, np.zeros(image.shape, image.dtype), 0, -50)
100
+
101
+ def special_effects(image, filter_type):
102
+ """Apply special effects"""
103
+ if filter_type == "Matrix Effect":
104
+ green_matrix = np.zeros_like(image)
105
+ green_matrix[:,:,1] = image[:,:,1] # Only green channel
106
+ random_brightness = np.random.randint(0, 255, size=image.shape[:2])
107
+ green_matrix[:,:,1] = np.minimum(green_matrix[:,:,1] + random_brightness, 255)
108
+ return green_matrix
109
+
110
+ elif filter_type == "Wave Effect":
111
+ rows, cols = image.shape[:2]
112
+ img_output = np.zeros(image.shape, dtype=image.dtype)
113
+
114
+ for i in range(rows):
115
+ for j in range(cols):
116
+ offset_x = int(25.0 * np.sin(2 * 3.14 * i / 180))
117
+ offset_y = int(25.0 * np.cos(2 * 3.14 * j / 180))
118
+ if i+offset_x < rows and j+offset_y < cols:
119
+ img_output[i,j] = image[(i+offset_x)%rows,(j+offset_y)%cols]
120
+ else:
121
+ img_output[i,j] = 0
122
+ return img_output
123
+
124
+ elif filter_type == "Time Stamp":
125
+ output = image.copy()
126
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
127
+ font = cv2.FONT_HERSHEY_SIMPLEX
128
+ cv2.putText(output, timestamp, (10, 30), font, 1, (255, 255, 255), 2)
129
+ return output
130
+
131
+ elif filter_type == "Glitch Effect":
132
+ glitch = image.copy()
133
+ h, w = image.shape[:2]
134
+ for _ in range(10):
135
+ x1 = random.randint(0, w-50)
136
+ y1 = random.randint(0, h-50)
137
+ x2 = random.randint(x1, min(x1+50, w))
138
+ y2 = random.randint(y1, min(y1+50, h))
139
+ glitch[y1:y2, x1:x2] = np.roll(glitch[y1:y2, x1:x2],
140
+ random.randint(-20, 20),
141
+ axis=random.randint(0, 1))
142
+ return glitch
143
+
144
+ def artistic_filters(image, filter_type):
145
+ """Applies artistic image filters"""
146
+ if filter_type == "Pop Art":
147
+ img_small = cv2.resize(image, None, fx=0.5, fy=0.5)
148
+ img_color = cv2.resize(img_small, (image.shape[1], image.shape[0]))
149
+ for _ in range(2):
150
+ img_color = cv2.bilateralFilter(img_color, 9, 300, 300)
151
+ hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)
152
+ hsv[:,:,1] = hsv[:,:,1]*1.5
153
+ return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
154
+
155
+ elif filter_type == "Oil Paint":
156
+ ret = np.float32(image.copy())
157
+ ret = cv2.bilateralFilter(ret, 9, 75, 75)
158
+ ret = cv2.detailEnhance(ret, sigma_s=15, sigma_r=0.15)
159
+ ret = cv2.edgePreservingFilter(ret, flags=1, sigma_s=60, sigma_r=0.4)
160
+ return np.uint8(ret)
161
+
162
+ elif filter_type == "Cartoon":
163
+ # Improved cartoon effect
164
+ color = image.copy()
165
+ gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
166
+ gray = cv2.medianBlur(gray, 5)
167
+ edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
168
+ color = cv2.bilateralFilter(color, 9, 300, 300)
169
+ cartoon = cv2.bitwise_and(color, color, mask=edges)
170
+ # Increase color saturation
171
+ hsv = cv2.cvtColor(cartoon, cv2.COLOR_BGR2HSV)
172
+ hsv[:,:,1] = hsv[:,:,1]*1.4 # saturation increase
173
+ return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
174
+
175
+ def atmospheric_filters(image, filter_type):
176
+ """atmospheric filters"""
177
+ if filter_type == "Autumn":
178
+ # Genhanced autumn effect
179
+ autumn_filter = np.array([
180
+ [0.393, 0.769, 0.189],
181
+ [0.349, 0.686, 0.168],
182
+ [0.272, 0.534, 0.131]
183
+ ])
184
+ autumn = cv2.transform(image, autumn_filter)
185
+ # Increase color temperature
186
+ hsv = cv2.cvtColor(autumn, cv2.COLOR_BGR2HSV)
187
+ hsv[:,:,0] = hsv[:,:,0]*0.8 # Shift to orange/yellow tones
188
+ hsv[:,:,1] = hsv[:,:,1]*1.2 # Increase saturation
189
+ return cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
190
+
191
+ elif filter_type == "Nostalgia":
192
+ # Improved nostalgia effect
193
+ # Reduce contrast and add yellowish tone
194
+ image = cv2.convertScaleAbs(image, alpha=0.9, beta=10)
195
+ sepia = cv2.transform(image, np.array([
196
+ [0.393, 0.769, 0.189],
197
+ [0.349, 0.686, 0.168],
198
+ [0.272, 0.534, 0.131]
199
+ ]))
200
+ # Darkening effect in corners
201
+ h, w = image.shape[:2]
202
+ kernel = np.zeros((h, w))
203
+ center = (h//2, w//2)
204
+ for i in range(h):
205
+ for j in range(w):
206
+ dist = np.sqrt((i-center[0])**2 + (j-center[1])**2)
207
+ kernel[i,j] = 1 - min(1, dist/(np.sqrt(h**2 + w**2)/2))
208
+ kernel = np.dstack([kernel]*3)
209
+ return cv2.multiply(sepia, kernel).astype(np.uint8)
210
+
211
+ elif filter_type == "Increase Brightness":
212
+ # Improved brightness boost
213
+ hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
214
+ # Increase brightness
215
+ hsv[:,:,2] = cv2.convertScaleAbs(hsv[:,:,2], alpha=1.2, beta=30)
216
+ # Also increase the contrast slightly
217
+ return cv2.convertScaleAbs(cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), alpha=1.1, beta=0)
218
+
219
+ def image_processing(image, filter_type):
220
+ """Main image processing function"""
221
+ if image is None:
222
+ return None
223
+
224
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
225
+
226
+ # Process by filter categories
227
+ basic_filter_list = ["Gray Toning", "Sepia", "X-ray", "Burn it"]
228
+ classic_filter_list = ["Charcoal Effect", "Sharpen", "Embossing", "Edge Detection"]
229
+ creative_filters_list = ["Rainbow", "Night Vision"]
230
+ special_effects_list = ["Matrix Effect", "Wave Effect", "Time Stamp", "Glitch Effect"]
231
+ artistic_filters_list = ["Pop Art", "Oil Paint", "Cartoon"]
232
+ atmospheric_filters_list = ["Autumn", "Increase Brightness"]
233
+
234
+ if filter_type in basic_filter_list:
235
+ output = basic_filter(image, filter_type)
236
+ elif filter_type in classic_filter_list:
237
+ output = classic_filter(image, filter_type)
238
+ elif filter_type in creative_filters_list:
239
+ output = creative_filters(image, filter_type)
240
+ elif filter_type in special_effects_list:
241
+ output = special_effects(image, filter_type)
242
+ elif filter_type in artistic_filters_list:
243
+ output = artistic_filters(image, filter_type)
244
+ elif filter_type in atmospheric_filters_list:
245
+ output = atmospheric_filters(image, filter_type)
246
+ else:
247
+ output = image
248
+
249
+ return cv2.cvtColor(output, cv2.COLOR_BGR2RGB) if len(output.shape) == 3 else output
250
+
251
+ # Get absolute path of image file
252
+ image_path = 'https://huggingface.co/spaces/DigiP-AI/Image_Studio/blob/main/abstract.jpg' # Replace with your image file path
253
+
254
+ absolute_path = os.path.abspath(image_path)
255
+
256
+ css = """
257
+ .gradio-container {
258
+ background: url(https://huggingface.co/spaces/DigiP-AI/Image_Studio/blob/main/abstract.jpg)
259
+ }
260
+ """
261
+
262
+ # Gradio interface
263
+ with gr.Blocks(theme=theme, css=css) as app:
264
+ gr.HTML("<center><h6>🎨 Image Studio</h6></center>")
265
+
266
+ with gr.Tab("Image to Prompt"):
267
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
268
+
269
+ # Initialize Florence model
270
+ device = "cuda" if torch.cuda.is_available() else "cpu"
271
+ florence_model = AutoModelForCausalLM.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True).to(device).eval()
272
+ florence_processor = AutoProcessor.from_pretrained('microsoft/Florence-2-base', trust_remote_code=True)
273
+
274
+ # api_key = os.getenv("HF_READ_TOKEN")
275
+
276
+ def generate_caption(image):
277
+ if not isinstance(image, Image.Image):
278
+ image = Image.fromarray(image)
279
+
280
+ inputs = florence_processor(text="<MORE_DETAILED_CAPTION>", images=image, return_tensors="pt").to(device)
281
+ generated_ids = florence_model.generate(
282
+ input_ids=inputs["input_ids"],
283
+ pixel_values=inputs["pixel_values"],
284
+ max_new_tokens=1024,
285
+ early_stopping=False,
286
+ do_sample=False,
287
+ num_beams=3,
288
+ )
289
+ generated_text = florence_processor.batch_decode(generated_ids, skip_special_tokens=False)[0]
290
+ parsed_answer = florence_processor.post_process_generation(
291
+ generated_text,
292
+ task="<MORE_DETAILED_CAPTION>",
293
+ image_size=(image.width, image.height)
294
+ )
295
+ prompt = parsed_answer["<MORE_DETAILED_CAPTION>"]
296
+ print("\n\nGeneration completed!:"+ prompt)
297
+ return prompt
298
+
299
+ io = gr.Interface(generate_caption,
300
+ inputs=[gr.Image(label="Input Image",height=320)],
301
+ outputs = [gr.Textbox(label="Output Prompt", lines=2, show_copy_button = True),
302
+ # gr.Image(label="Output Image")
303
+ ]
304
+ )
305
+
306
+ with gr.Tab("Text to Image"):
307
+ gr.HTML("<center><h6>ℹ️ Please do not run the models at the same time, the models are currently running on the CPU, which might affect performance.</h6></center>")
308
+ with gr.Accordion("Turbo-HyperRealistic", open=False):
309
+ model1 = gr.load("models/prithivMLmods/SD3.5-Large-Turbo-HyperRealistic-LoRA")
310
+ with gr.Accordion("Turbo-Realism", open=False):
311
+ model2 = gr.load("models/prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA")
312
+ with gr.Accordion("Stable-Diffusion-3.5-large", open=False):
313
+ model3 = gr.load("models/stabilityai/stable-diffusion-3.5-large")
314
+
315
+ with gr.Tab("Flip Image"):
316
+ with gr.Row():
317
+ image_input = gr.Image(type="numpy", label="Upload Image",height=320)
318
+ image_output = gr.Image(format="png")
319
+ with gr.Row():
320
+ image_button = gr.Button("Run", variant='primary')
321
+ image_button.click(flip_image, inputs=image_input, outputs=image_output)
322
+ with gr.Row():
323
+ clear_results = gr.Button(value="Clear Image", variant="primary", elem_id="clear_button")
324
+ clear_results.click(lambda: (None, None), None, [image_input, image_output])
325
+ with gr.Tab("Image Filters"):
326
+ with gr.Row():
327
+ with gr.Column():
328
+ image_input = gr.Image(type="numpy", label="Upload Image", height=320)
329
+ with gr.Accordion("ℹ️ Filter Categories", open=True):
330
+ filter_type = gr.Dropdown(
331
+ [
332
+ # Basic Filters
333
+ "Gray Toning", "Sepia", "X-ray", "Burn it",
334
+ # Classic Filter
335
+ "Charcoal Effect", "Sharpen", "Embossing", "Edge Detection",
336
+ # Creative Filters
337
+ "Rainbow", "Night Vision",
338
+ # Special Effects
339
+ "Matrix Effect", "Wave Effect", "Time Stamp", "Glitch Effect",
340
+ # Artistic Filters
341
+ "Pop Art", "Oil Paint", "Cartoon",
342
+ # Atmospheric Filters
343
+ "Autumn", "Increase Brightness"
344
+ ],
345
+ label="🎭 Select Filter",
346
+ info="Choose the effect you want"
347
+ )
348
+ submit_button = gr.Button("✨ Apply Filter", variant="primary")
349
+
350
+ with gr.Column():
351
+ image_output = gr.Image(label="🖼️ Filtered Image")
352
+
353
+ submit_button.click(
354
+ image_processing,
355
+ inputs=[image_input, filter_type],
356
+ outputs=image_output
357
+ )
358
+
359
+
360
+
361
+ with gr.Tab("Image Upscaler"):
362
+ with gr.Row():
363
+ with gr.Column():
364
+ def upscale_image(input_image, radio_input):
365
+ upscale_factor = radio_input
366
+ output_image = cv2.resize(input_image, None, fx = upscale_factor, fy = upscale_factor, interpolation = cv2.INTER_CUBIC)
367
+ return output_image
368
+
369
+ radio_input = gr.Radio(label="Upscale Levels", choices=[2, 4, 6, 8, 10], value=2)
370
+
371
+ iface = gr.Interface(fn=upscale_image, inputs = [gr.Image(label="Input Image", interactive=True), radio_input], outputs = gr.Image(label="Upscaled Image", format="png"), title="Image Upscaler")
372
+
373
+ app.launch(share=True)