NeuralFalcon commited on
Commit
205273a
·
verified ·
1 Parent(s): e54f550

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +232 -0
  2. config.py +22 -0
  3. create_mask.py +84 -0
  4. inpaint.py +121 -0
app.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import imageio
4
+ import cv2
5
+ import numpy as np
6
+ from inpaint import InpaintingTester
7
+ import os
8
+ import shutil
9
+ import re
10
+ import uuid
11
+
12
+ def create_mask(watermark, mask_type="white"):
13
+ """
14
+ Create a mask for the watermark region.
15
+ mask_type: 'white' for white mask and 'black' for black mask
16
+ """
17
+ h, w, _ = watermark.shape
18
+ if mask_type == "white":
19
+ return np.ones((h, w), dtype=np.uint8) * 255 # White mask
20
+ elif mask_type == "black":
21
+ return np.zeros((h, w), dtype=np.uint8) # Black mask
22
+ return None
23
+
24
+
25
+ def inpaint_watermark(watermark, mask):
26
+ """Inpaint the watermark region using the mask."""
27
+ return cv2.inpaint(watermark, mask, inpaintRadius=3, flags=cv2.INPAINT_TELEA)
28
+
29
+
30
+ def place_inpainted_back(image, inpainted_region, location):
31
+ """Place the inpainted region back into the original image."""
32
+ x_start, y_start, x_end, y_end = location
33
+ image[y_start:y_end, x_start:x_end] = inpainted_region
34
+ return image
35
+
36
+
37
+ def extract_watermark(image, height_ratio=0.15, width_ratio=0.15, margin=0):
38
+ """Extract watermark from the image using given ratios and margin."""
39
+ h, w, _ = image.shape
40
+ crop_h, crop_w = int(h * height_ratio), int(w * width_ratio)
41
+ x_start, y_start = w - crop_w, h - crop_h
42
+ watermark = image[y_start:h-margin, x_start:w-margin]
43
+ location = (x_start, y_start, w-margin, h-margin)
44
+ return watermark, location
45
+
46
+
47
+ def load_inpainting_model():
48
+ """Load the inpainting model."""
49
+ save_path = "./output"
50
+ # resize_to = None # Default size from config
51
+ resize_to = (480,480)
52
+ return InpaintingTester(save_path, resize_to)
53
+
54
+
55
+ def process_image_with_model(image_path, mask_path, tester):
56
+ """Process the image using the inpainting model and return the cleaned image path."""
57
+ image_mask_pairs = [(image_path, mask_path)]
58
+ return tester.process_multiple_images(image_mask_pairs)[0]
59
+
60
+
61
+
62
+
63
+
64
+ def img_file_name(image_path):
65
+ global image_folder
66
+ text=os.path.basename(image_path)
67
+ text=text.split(".")[0]
68
+ # Remove all non-alphabetic characters and convert to lowercase
69
+ text = re.sub(r'[^a-zA-Z\s]', '', text) # Retain only alphabets and spaces
70
+ text = text.lower().strip() # Convert to lowercase and strip leading/trailing spaces
71
+ text = text.replace(" ", "_") # Replace spaces with underscores
72
+
73
+ # Truncate or handle empty text
74
+ truncated_text = text[:25] if len(text) > 25 else text if len(text) > 0 else "empty"
75
+
76
+ # Generate a random string for uniqueness
77
+ random_string = uuid.uuid4().hex[:8].upper()
78
+
79
+ # Construct the file name
80
+ file_name = f"{image_folder}/{truncated_text}_{random_string}.png"
81
+ return file_name
82
+
83
+ def logo_remover(image_path):
84
+ image = cv2.imread(image_path)
85
+ image = cv2.resize(image, (1280, 1280)) # Resize image if needed
86
+
87
+ # Extract watermark and location
88
+ first_crop, first_location = extract_watermark(image, 0.50, 0.50, 0)
89
+ watermark, location = extract_watermark(first_crop, 0.12, 0.26, 27) #height, side, margin
90
+
91
+
92
+ # Create black and white masks
93
+ mask1 = create_mask(first_crop, "black")
94
+ mask2 = create_mask(watermark, "white")
95
+ combined_mask = place_inpainted_back(mask1, mask2, location)
96
+
97
+ # Save temporary files
98
+ input_image = "./input/temp.png"
99
+ input_mask = "./input/temp_mask.png"
100
+ # temp_image = cv2.resize(first_crop, (512, 512))
101
+ temp_image=first_crop
102
+ cv2.imwrite(input_image, temp_image)
103
+ # temp_mask = cv2.resize(combined_mask, (512, 512))
104
+ temp_mask=combined_mask
105
+ cv2.imwrite(input_mask, temp_mask)
106
+
107
+
108
+ clean_image_path = process_image_with_model(input_image, input_mask, tester)
109
+
110
+ # Check if the image was loaded correctly
111
+ if clean_image_path is None:
112
+ print(f"Failed to load image: {clean_image_path}")
113
+ return # Or handle the error accordingly
114
+ clean_image = cv2.imread(clean_image_path)
115
+ clean_image = cv2.resize(clean_image, (combined_mask.shape[1], combined_mask.shape[0]))
116
+ result_image = place_inpainted_back(image, clean_image, first_location)
117
+ save_path=img_file_name(image_path)
118
+ cv2.imwrite(save_path, result_image)
119
+ return save_path
120
+
121
+
122
+
123
+
124
+
125
+ # Define a function to handle the image editing and return the final result
126
+ def process_and_return(im):
127
+ global tester
128
+ # Save the composite image (base image) and mask to files
129
+ base_image_path = "base_image.png"
130
+ mask_image_path = "mask_image.png"
131
+
132
+ # Save the composite image (base image)
133
+ imageio.imwrite(base_image_path, im["composite"])
134
+
135
+ # Extract the alpha channel (mask)
136
+ alpha_channel = im["layers"][0][:, :, 3]
137
+
138
+ # Create the mask: white (255) where drawn, black (0) elsewhere
139
+ mask = np.zeros_like(alpha_channel, dtype=np.uint8)
140
+ mask[alpha_channel > 0] = 255 # Set drawn areas to white (255)
141
+
142
+ # Save the mask image
143
+ imageio.imwrite(mask_image_path, mask)
144
+ # Process the images using the inpainting model
145
+ final_result = process_image_with_model(base_image_path, mask_image_path,tester)
146
+
147
+ # Return the processed image
148
+ return final_result
149
+
150
+ def ui_3():
151
+ # Create a Gradio app
152
+ with gr.Blocks() as demo:
153
+ with gr.Row():
154
+ # Create an ImageEditor component for uploading and editing the image
155
+ im = gr.ImageEditor(
156
+ type="numpy",
157
+ canvas_size=(1, 1), # Use canvas_size instead of crop_size
158
+ layers=True, # Allow layers in the editor
159
+ transforms=["crop"], # Allow cropping
160
+ format="png", # Save images in PNG format
161
+ label="Base Image",
162
+ show_label=True
163
+ )
164
+ # Create an Image component to display the processed result
165
+ im2 = gr.Image(label="Processed Image", show_label=True)
166
+
167
+ # Create a Button to trigger the image processing
168
+ btn = gr.Button("Process Image")
169
+
170
+ # Define an event listener to trigger the image processing when the button is clicked
171
+ btn.click(process_and_return, inputs=im, outputs=im2) # Output processed image
172
+ return demo
173
+ # def handle_pil_image(image):
174
+
175
+ # logo_remover(image)
176
+
177
+
178
+ def ui_1():
179
+ test_examples=[["./input/image.jpg"]]
180
+ gradio_input=[gr.Image(label='Upload an Image',type="filepath")]
181
+ gradio_Output=[gr.Image(label='Display Image')]
182
+ gradio_interface = gr.Interface(fn=logo_remover, inputs=gradio_input,outputs=gradio_Output ,
183
+ title="Meta Watermark Remover For Image",
184
+ examples=test_examples)
185
+ return gradio_interface
186
+ from PIL import Image
187
+ import zipfile
188
+
189
+ def make_zip(image_list):
190
+ zip_path = f"./temp/images/{uuid.uuid4().hex[:6]}.zip"
191
+ with zipfile.ZipFile(zip_path, 'w') as zipf:
192
+ for image in image_list:
193
+ zipf.write(image, os.path.basename(image))
194
+ return zip_path
195
+
196
+ def handle_multiple_files(image_files):
197
+ image_list = []
198
+ if len(image_files) == 1:
199
+ saved_path=logo_remover(image_files[0])
200
+ return saved_path
201
+ else:
202
+ for image_path in image_files:
203
+ saved_path=logo_remover(image_path)
204
+ image_list.append(saved_path)
205
+ zip_path = make_zip(image_list)
206
+ return zip_path
207
+
208
+
209
+
210
+ def ui_2():
211
+ gradio_multiple_images = gr.Interface(
212
+ handle_multiple_files,
213
+ [gr.File(type='filepath', file_count='multiple',label='Upload Images')],
214
+ [gr.File(label='Download File')],
215
+ title='Meta Watermark Remover For Bulk Images',
216
+ cache_examples=True
217
+ )
218
+ return gradio_multiple_images
219
+
220
+ # Load and process the inpainting model
221
+ tester = load_inpainting_model()
222
+ image_folder="./temp/images"
223
+ if not os.path.exists(image_folder):
224
+ os.makedirs(image_folder)
225
+
226
+ # Launch the Gradio app
227
+ if __name__ == "__main__":
228
+ demo2 = ui_1()
229
+ demo3 = ui_2()
230
+ demo1=ui_3()
231
+ demo=gr.TabbedInterface([demo1,demo2,demo3], title="Meta Watermark Remover",tab_names=["Manual Remove","Meta Single Image","Meta Bulk Images"])
232
+ demo.launch(show_error=True)
config.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ # GENERIC
4
+ GPU_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
5
+ INIMAGE = "./input/image.jpg"
6
+ MASKIMAGE = "./input/mask.jpg"
7
+ OUTIMAGE = "./output/inpainted_img.png"
8
+ RESIZE_TO = (512, 512)
9
+ CUDA = True if torch.cuda.is_available() else False
10
+
11
+ # DEEPFILLv2
12
+ DEEPFILL_MODEL_PATH = "./model/deepfillv2_WGAN.pth"
13
+ GPU_ID = -1
14
+ INIT_TYPE = "xavier"
15
+ INIT_GAIN = 0.02
16
+ PAD_TYPE = "zero"
17
+ IN_CHANNELS = 4
18
+ OUT_CHANNELS = 3
19
+ LATENT_CHANNELS = 48
20
+ ACTIVATION = "elu"
21
+ NORM = "in"
22
+ NUM_WORKERS = 0
create_mask.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from config import RESIZE_TO, MASKIMAGE
4
+
5
+ # image mask
6
+
7
+ # free form mask
8
+ # bbox mask
9
+
10
+
11
+ def create_ff_mask():
12
+ config = {
13
+ "img_shape": list(RESIZE_TO),
14
+ "mv": 15,
15
+ "ma": 4.0,
16
+ "ml": 40,
17
+ "mbw": 5,
18
+ }
19
+
20
+ h, w = config["img_shape"]
21
+ mask = np.zeros((h, w))
22
+ num_v = np.random.randint(config["mv"])
23
+
24
+ for i in range(num_v):
25
+ start_x = np.random.randint(w)
26
+ start_y = np.random.randint(h)
27
+ for j in range(1 + np.random.randint(5)):
28
+ angle = 0.01 + np.random.randint(config["ma"])
29
+ if i % 2 == 0:
30
+ angle = 2 * 3.1415926 - angle
31
+ length = 10 + np.random.randint(config["ml"])
32
+ brush_w = 5 + np.random.randint(config["mbw"])
33
+ end_x = (start_x + length * np.sin(angle)).astype(np.int32)
34
+ end_y = (start_y + length * np.cos(angle)).astype(np.int32)
35
+
36
+ cv2.line(mask, (start_y, start_x), (end_y, end_x), 255.0, brush_w)
37
+ start_x, start_y = end_x, end_y
38
+
39
+ mask = mask.astype(np.uint8)
40
+ cv2.imwrite(MASKIMAGE, mask)
41
+
42
+
43
+ def create_bbox_mask():
44
+ shape = list(RESIZE_TO)
45
+ margin = [10, 10]
46
+ bbox_shape = [30, 30]
47
+
48
+ def random_bbox(shape, margin, bbox_shape):
49
+ """Generate a random tlhw with configuration.
50
+ Args:
51
+ config: Config should have configuration including IMG_SHAPES,
52
+ VERTICAL_MARGIN, HEIGHT, HORIZONTAL_MARGIN, WIDTH.
53
+ Returns:
54
+ tuple: (top, left, height, width)
55
+ """
56
+ img_height, img_width = shape
57
+ height, width = bbox_shape
58
+ ver_margin, hor_margin = margin
59
+ maxt = img_height - ver_margin - height
60
+ maxl = img_width - hor_margin - width
61
+ t = np.random.randint(low=ver_margin, high=maxt)
62
+ l = np.random.randint(low=hor_margin, high=maxl)
63
+ h = height
64
+ w = width
65
+ return (t, l, h, w)
66
+
67
+ bboxs = []
68
+ for i in range(20):
69
+ bbox = random_bbox(shape, margin, bbox_shape)
70
+ bboxs.append(bbox)
71
+
72
+ height, width = shape
73
+ mask = np.zeros((height, width), np.float32)
74
+ # print(mask.shape)
75
+ for bbox in bboxs:
76
+ h = int(bbox[2] * 0.1) + np.random.randint(int(bbox[2] * 0.2 + 1))
77
+ w = int(bbox[3] * 0.1) + np.random.randint(int(bbox[3] * 0.2) + 1)
78
+ mask[
79
+ (bbox[0] + h) : (bbox[0] + bbox[2] - h),
80
+ (bbox[1] + w) : (bbox[1] + bbox[3] - w),
81
+ ] = 255.0
82
+
83
+ mask = mask.astype(np.uint8)
84
+ cv2.imwrite(MASKIMAGE, mask)
inpaint.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ from torch.utils.data import DataLoader
5
+ from types import SimpleNamespace
6
+ from deepfillv2 import test_dataset, utils
7
+ from config import *
8
+
9
+ class InpaintingTester:
10
+ def __init__(self, save_path, resize_to=None):
11
+ if resize_to is None:
12
+ resize_to = RESIZE_TO
13
+ self.save_path = save_path
14
+ self.setsize = resize_to
15
+
16
+ # Build the generator network
17
+ opt = SimpleNamespace(
18
+ pad_type=PAD_TYPE,
19
+ in_channels=IN_CHANNELS,
20
+ out_channels=OUT_CHANNELS,
21
+ latent_channels=LATENT_CHANNELS,
22
+ activation=ACTIVATION,
23
+ norm=NORM,
24
+ init_type=INIT_TYPE,
25
+ init_gain=INIT_GAIN,
26
+ use_cuda=CUDA,
27
+ gpu_device=GPU_DEVICE,
28
+ )
29
+
30
+ # Initialize generator (only once)
31
+ self.generator = utils.create_generator(opt).eval()
32
+
33
+ # Load pretrained model weights
34
+ # print("-- INPAINT: Loading Pretrained Model --")
35
+ self.load_model_generator(self.generator)
36
+
37
+ # Move the generator to GPU
38
+ self.generator = self.generator.to(GPU_DEVICE)
39
+
40
+ def load_model_generator(self, generator):
41
+ pretrained_dict = torch.load(
42
+ DEEPFILL_MODEL_PATH, map_location=torch.device(GPU_DEVICE), weights_only=True
43
+ )
44
+ generator.load_state_dict(pretrained_dict)
45
+
46
+ def process_image(self, in_image, mask_image, save_image_path):
47
+ # Initialize dataset and dataloader
48
+ trainset = test_dataset.InpaintDataset(in_image, mask_image, self.setsize)
49
+ dataloader = DataLoader(
50
+ trainset,
51
+ batch_size=1,
52
+ shuffle=False,
53
+ num_workers=8,
54
+ pin_memory=True,
55
+ )
56
+
57
+ # Testing loop for a single image
58
+ for batch_idx, (img, mask) in enumerate(dataloader):
59
+ img = img.to(GPU_DEVICE)
60
+ mask = mask.to(GPU_DEVICE)
61
+
62
+ # Generator output
63
+ with torch.no_grad():
64
+ first_out, second_out = self.generator(img, mask)
65
+
66
+ # Combine outputs with input
67
+ first_out_wholeimg = img * (1 - mask) + first_out * mask
68
+ second_out_wholeimg = img * (1 - mask) + second_out * mask
69
+
70
+ masked_img = img * (1 - mask) + mask
71
+ mask = torch.cat((mask, mask, mask), 1)
72
+ img_list = [second_out_wholeimg]
73
+ name_list = ["second_out"]
74
+
75
+ # Save the sample image
76
+ results_path = os.path.dirname(save_image_path)
77
+ if not os.path.exists(results_path):
78
+ os.makedirs(results_path)
79
+
80
+ utils.save_sample_png(
81
+ sample_folder=results_path,
82
+ sample_name=os.path.basename(save_image_path),
83
+ img_list=img_list,
84
+ name_list=name_list,
85
+ pixel_max_cnt=255,
86
+ )
87
+
88
+ def process_multiple_images(self, image_mask_pairs):
89
+ # Iterate through a list of image/mask pairs and save results
90
+ png_images=[]
91
+ for img_path, mask_path in image_mask_pairs:
92
+ try:
93
+ save_image_path = os.path.join(self.save_path, os.path.basename(img_path))
94
+ print(f"Processing: {img_path} and {mask_path}")
95
+ self.process_image(img_path, mask_path, save_image_path)
96
+ extention = os.path.splitext(save_image_path)[1]
97
+ save_at=save_image_path.replace(extention, ".png")
98
+ png_images.append(save_at)
99
+ except Exception as e:
100
+ if self.save_path in png_images:
101
+ png_images.pop()
102
+ png_images.append(None)
103
+ print(f"Error: {e}")
104
+ # print("-- All Inpainting is finished --")
105
+ return png_images
106
+
107
+ # Main execution
108
+ # if __name__ == "__main__":
109
+ # save_path = "./output"
110
+ # resize_to = None # Default size from config
111
+
112
+ # # List of image and mask pairs
113
+ # image_mask_pairs = [
114
+ # ( "./input/image.jpg", "./input/mask.jpg"),
115
+ # ]
116
+
117
+ # tester = InpaintingTester(save_path, resize_to)
118
+
119
+ # # Process multiple images using a loop
120
+ # results=tester.process_multiple_images(image_mask_pairs)
121
+ # print(results)