File size: 4,738 Bytes
e622033
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import requests
from PIL import Image
from io import BytesIO
from numpy import asarray
import gradio as gr
import numpy as np
from math import ceil
from huggingface_hub import from_pretrained_keras

r = requests.get(
    'https://api.nasa.gov/planetary/apod?api_key=0eyGPKWmJmE5Z0Ijx25oG56ydbTKWE2H75xuEefx')
result = r.json()
receive = requests.get(result['url'])
img = Image.open(BytesIO(receive.content)).convert('RGB')

model = from_pretrained_keras("GIanlucaRub/autoencoder_model_d_0")


def double_res(input_image):
    input_height = input_image.shape[0]
    input_width = input_image.shape[1]
    height = ceil(input_height/128)
    width = ceil(input_width/128)
    expanded_input_image = np.zeros((128*height, 128*width, 3), dtype=np.uint8)
    np.copyto(expanded_input_image[0:input_height, 0:input_width], input_image)

    output_image = np.zeros((128*height*2, 128*width*2, 3), dtype=np.float32)

    for i in range(height):
        for j in range(width):
            temp_slice = expanded_input_image[i *
                                              128:(i+1)*128, j*128:(j+1)*128]/255
            upsampled_slice = model.predict(temp_slice[np.newaxis, ...])
            np.copyto(output_image[i*256:(i+1)*256, j *
                      256:(j+1)*256], upsampled_slice[0])
            if i != 0 and j != 0 and i != height-1 and j != width-1:
                # removing inner borders
                right_slice = expanded_input_image[i *
                                                   128:(i+1)*128, (j+1)*128-64:(j+1)*128+64]/255
                right_upsampled_slice = model.predict(
                    right_slice[np.newaxis, ...])
                resized_right_slice = right_upsampled_slice[0][64:192, 64:192]
                np.copyto(output_image[i*256+64:(i+1)*256-64,
                          (j+1)*256-64:(j+1)*256+64], resized_right_slice)

                left_slice = expanded_input_image[i *
                                                  128:(i+1)*128, j*128-64:(j)*128+64]/255
                left_upsampled_slice = model.predict(
                    left_slice[np.newaxis, ...])
                resized_left_slice = left_upsampled_slice[0][64:192, 64:192]
                np.copyto(output_image[i*256+64:(i+1)*256-64,
                          j*256-64:j*256+64], resized_left_slice)

                upper_slice = expanded_input_image[(
                    i+1)*128-64:(i+1)*128+64, j*128:(j+1)*128]/255
                upper_upsampled_slice = model.predict(
                    upper_slice[np.newaxis, ...])
                resized_upper_slice = upper_upsampled_slice[0][64:192, 64:192]
                np.copyto(output_image[(i+1)*256-64:(i+1)*256+64,
                          j*256+64:(j+1)*256-64], resized_upper_slice)

                lower_slice = expanded_input_image[i *
                                                   128-64:i*128+64, j*128:(j+1)*128]/255
                lower_upsampled_slice = model.predict(
                    lower_slice[np.newaxis, ...])
                resized_lower_slice = lower_upsampled_slice[0][64:192, 64:192]
                np.copyto(output_image[i*256-64:i*256+64,
                          j*256+64:(j+1)*256-64], resized_lower_slice)


# removing angles
                lower_right_slice = expanded_input_image[i *
                                                         128-64:i*128+64, (j+1)*128-64:(j+1)*128+64]/255
                lower_right_upsampled_slice = model.predict(
                    lower_right_slice[np.newaxis, ...])
                resized_lower_right_slice = lower_right_upsampled_slice[0][64:192, 64:192]
                np.copyto(output_image[i*256-64:i*256+64,  (j+1)
                          * 256-64:(j+1)*256+64], resized_lower_right_slice)

                lower_left_slice = expanded_input_image[i *
                                                        128-64:i*128+64, j*128-64:j*128+64]/255
                lower_left_upsampled_slice = model.predict(
                    lower_left_slice[np.newaxis, ...])
                resized_lower_left_slice = lower_left_upsampled_slice[0][64:192, 64:192]
                np.copyto(
                    output_image[i*256-64:i*256+64,  j*256-64:j*256+64], resized_lower_left_slice)

    resized_output_image = output_image[0:input_height*2, 0:input_width*2]
    return resized_output_image


with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            gr.Label("Original image")
            input_img = gr.Image(img)
        with gr.Column():
            gr.Label("Image with resolution doubled")
            numpydata = asarray(img)
            output = double_res(numpydata)  # numpy.ndarray
            input_img = gr.Image(output)
demo.launch()