Mathdesenvnonimate commited on
Commit
a5ec7c9
·
verified ·
1 Parent(s): f906e4f

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -81
app.py CHANGED
@@ -49,87 +49,8 @@ def check_input_image(input_image):
49
 
50
 
51
  def preprocess(input_image, do_remove_background, foreground_ratio):
52
- def pre_process(img: np.array) -> np.array:
53
- # H, W, C -> C, H, W
54
- img = np.transpose(img[:, :, 0:3], (2, 0, 1))
55
- # C, H, W -> 1, C, H, W
56
- img = np.expand_dims(img, axis=0).astype(np.float32)
57
- return img
58
-
59
-
60
- def post_process(img: np.array) -> np.array:
61
- # 1, C, H, W -> C, H, W
62
- img = np.squeeze(img)
63
- # C, H, W -> H, W, C
64
- img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
65
- return img
66
-
67
-
68
- def inference(model_path: str, img_array: np.array) -> np.array:
69
- options = onnxruntime.SessionOptions()
70
- options.intra_op_num_threads = 1
71
- options.inter_op_num_threads = 1
72
- ort_session = onnxruntime.InferenceSession(model_path, options)
73
- ort_inputs = {ort_session.get_inputs()[0].name: img_array}
74
- ort_outs = ort_session.run(None, ort_inputs)
75
-
76
- return ort_outs[0]
77
-
78
-
79
- def convert_pil_to_cv2(input_image):
80
- # pil_image = image.convert("RGB")
81
- open_cv_image = np.array(input_image)
82
- # RGB to BGR
83
- open_cv_image = open_cv_image[:, :, ::-1].copy()
84
- return open_cv_image
85
-
86
-
87
- def upscale(image, model):
88
- model_path = f"models/modelx25.ort"
89
- img = convert_pil_to_cv2(image)
90
- if img.ndim == 2:
91
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
92
-
93
- if img.shape[2] == 4:
94
- alpha = img[:, :, 3] # GRAY
95
- alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR
96
- alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR
97
- alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY
98
-
99
- img = img[:, :, 0:3] # BGR
100
- image_output = post_process(inference(model_path, pre_process(img))) # BGR
101
- image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA
102
- image_output[:, :, 3] = alpha_output
103
-
104
- elif img.shape[2] == 3:
105
- image_output = post_process(inference(model_path, pre_process(img))) # BGR
106
-
107
- return image_output
108
-
109
-
110
-
111
- def fill_background(image):
112
- image = np.array(image).astype(np.float32) / 255.0
113
- image = image[:, :, :3] * image[:, :, 3:4] + (1 - image[:, :, 3:4]) * 0.5
114
- image = Image.fromarray((image * 255.0).astype(np.uint8))
115
- return image
116
-
117
-
118
-
119
-
120
- if do_remove_background:
121
- image = image_output.convert("RGB")
122
- image = remove_background(image, rembg_session)
123
- image = resize_foreground(image, foreground_ratio)
124
- image = fill_background(image)
125
- else:
126
- image = image_output
127
- if image.mode == "RGBA":
128
- image = fill_background(image)
129
- return image
130
-
131
-
132
- def fill_background(image):
133
  image = np.array(image).astype(np.float32) / 255.0
134
  image = image[:, :, :3] * image[:, :, 3:4] + (1 - image[:, :, 3:4]) * 0.5
135
  image = Image.fromarray((image * 255.0).astype(np.uint8))
 
49
 
50
 
51
  def preprocess(input_image, do_remove_background, foreground_ratio):
52
+
53
+ def fill_background(image):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  image = np.array(image).astype(np.float32) / 255.0
55
  image = image[:, :, :3] * image[:, :, 3:4] + (1 - image[:, :, 3:4]) * 0.5
56
  image = Image.fromarray((image * 255.0).astype(np.uint8))