File size: 2,910 Bytes
49f65e4
 
269eef7
49f65e4
50ea707
 
49f65e4
50ea707
 
 
 
 
 
 
49f65e4
50ea707
 
 
 
 
 
49f65e4
50ea707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c8067b8
7f5a3aa
 
70913b7
50ea707
 
 
 
 
d0b54f1
50ea707
d0b54f1
50ea707
b54a771
 
 
d0b54f1
49f65e4
50ea707
 
 
 
 
 
49f65e4
50ea707
d911574
9c7d136
0f5662c
49f65e4
50ea707
 
 
 
 
 
 
9698c7f
49f65e4
 
 
43d4050
ee06152
50ea707
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import torch
from PIL import Image
from RealESRGAN import RealESRGAN
import gradio as gr
import os
import spaces

# Kiểm tra và cấu hình GPU
if torch.cuda.is_available():
    print(f"CUDA is available. GPU: {torch.cuda.get_device_name(0)}")
    device = torch.device("cuda")
else:
    print("CUDA is not available. Using CPU.")
    device = torch.device("cpu")

# Lazy loading cho các mô hình
class LazyRealESRGAN:
    def __init__(self, device, scale):
        self.device = device
        self.scale = scale
        self.model = None

    def load_model(self):
        if self.model is None:
            self.model = RealESRGAN(self.device, scale=self.scale)
            self.model.load_weights(f'weights/RealESRGAN_x{self.scale}.pth', download=True)

    def predict(self, img):
        self.load_model()
        return self.model.predict(img)

model2 = LazyRealESRGAN(device, scale=2)
model4 = LazyRealESRGAN(device, scale=4)
model8 = LazyRealESRGAN(device, scale=8)

# Hàm inference chính
@spaces.GPU
def inference(image, size):
    if image is None:
        raise gr.Error("Image not uploaded")

    try:
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        
        if size == '2x':
            result = model2.predict(image.convert('RGB'))
        elif size == '4x':
            result = model4.predict(image.convert('RGB'))
        else:
            width, height = image.size
            if width >= 5000 or height >= 5000:
                raise gr.Error("The image is too large.")
            result = model8.predict(image.convert('RGB'))

        print(f"Image size ({device}): {size} ... OK")
        return result
    except torch.cuda.OutOfMemoryError:
        raise gr.Error("GPU out of memory. Try a smaller image or lower upscaling factor.")
    except Exception as e:
        raise gr.Error(f"An error occurred: {str(e)}")

# Cấu hình giao diện Gradio
title = "Face Real ESRGAN UpScale: 2x 4x 8x"
description = "This is an unofficial demo for Real-ESRGAN. Scales the resolution of a photo. This model shows better results on faces compared to the original version.<br>Telegram BOT: https://t.me/restoration_photo_bot"
article = "<div style='text-align: center;'>Twitter <a href='https://twitter.com/DoEvent' target='_blank'>Max Skobeev</a> | <a href='https://huggingface.co/sberbank-ai/Real-ESRGAN' target='_blank'>Model card</a><div>"

# Khởi tạo và chạy giao diện Gradio
iface = gr.Interface(
    inference,
    [
        gr.Image(type="pil"),
        gr.Radio(["2x", "4x", "8x"], type="value", value="2x", label="Resolution model")
    ],
    gr.Image(type="pil", label="Output"),
    title=title,
    description=description,
    article=article,
    examples=[["groot.jpeg", "2x"]],
    flagging_mode="never",
    cache_examples=True
)

# Chạy ứng dụng
if __name__ == "__main__":
    iface.launch(debug=True, show_error=True)