Spaces:
Running
Running
File size: 2,282 Bytes
9efad20 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
from typing import Any, List, Callable
import cv2
import numpy as np
import onnxruntime
import roop.globals
from roop.typing import Face, Frame, FaceSet
from roop.utilities import resolve_relative_path
# THREAD_LOCK = threading.Lock()
class Enhance_GFPGAN():
plugin_options:dict = None
model_gfpgan = None
name = None
devicename = None
processorname = 'gfpgan'
type = 'enhance'
def Initialize(self, plugin_options:dict):
if self.plugin_options is not None:
if self.plugin_options["devicename"] != plugin_options["devicename"]:
self.Release()
self.plugin_options = plugin_options
if self.model_gfpgan is None:
model_path = resolve_relative_path('../models/GFPGANv1.4.onnx')
self.model_gfpgan = onnxruntime.InferenceSession(model_path, None, providers=roop.globals.execution_providers)
# replace Mac mps with cpu for the moment
self.devicename = self.plugin_options["devicename"].replace('mps', 'cpu')
self.name = self.model_gfpgan.get_inputs()[0].name
def Run(self, source_faceset: FaceSet, target_face: Face, temp_frame: Frame) -> Frame:
# preprocess
input_size = temp_frame.shape[1]
temp_frame = cv2.resize(temp_frame, (512, 512), cv2.INTER_CUBIC)
temp_frame = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB)
temp_frame = temp_frame.astype('float32') / 255.0
temp_frame = (temp_frame - 0.5) / 0.5
temp_frame = np.expand_dims(temp_frame, axis=0).transpose(0, 3, 1, 2)
io_binding = self.model_gfpgan.io_binding()
io_binding.bind_cpu_input("input", temp_frame)
io_binding.bind_output("1288", self.devicename)
self.model_gfpgan.run_with_iobinding(io_binding)
ort_outs = io_binding.copy_outputs_to_cpu()
result = ort_outs[0][0]
# post-process
result = np.clip(result, -1, 1)
result = (result + 1) / 2
result = result.transpose(1, 2, 0) * 255.0
result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)
scale_factor = int(result.shape[1] / input_size)
return result.astype(np.uint8), scale_factor
def Release(self):
self.model_gfpgan = None
|