File size: 4,945 Bytes
0fc624e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import torch
from PIL import Image
import matplotlib.pyplot as plt
from extract.getim import load_image
from torchvision import transforms
import cv2
import matplotlib.pyplot as plt



transform = transforms.Compose([
    transforms.ToTensor(),  # 将numpy数组或PIL.Image读的图片转换成(C,H, W)的Tensor格式且/255归一化到[0,1.0]之间
])  # 来自ImageNet的mean和variance


# fcontent = load_image("./ori/0.jpg",transform=None,shape=[512, 256])



def show_cut(path, left, upper, right, lower):
    """

        原图与所截区域相比较

    :param path: 图片路径

    :param left: 区块左上角位置的像素点离图片左边界的距离

    :param upper:区块左上角位置的像素点离图片上边界的距离

    :param right:区块右下角位置的像素点离图片左边界的距离

    :param lower:区块右下角位置的像素点离图片上边界的距离

     故需满足:lower > upper、right > left

    """

    img = path

    # print("This image's size: {}".format(img.size))  # (W, H)
    # img.save("kkk.jpg")
    # plt.figure("Image Contrast")
    #
    # plt.subplot(1, 2, 1)
    # plt.title('origin')
    #
    # plt.imshow(img)
    # plt.axis('off')
    #
    # box = (left, upper, right, lower)
    # roi = img.crop(box)
    #
    # plt.subplot(1, 2, 2)
    # plt.title('roi')
    # plt.imshow(roi)
    # plt.axis('off')
    # plt.show()


def image_cut_save(path, left, upper, right, lower):
    """

        所截区域图片保存

    :param path: 图片路径

    :param left: 区块左上角位置的像素点离图片左边界的距离

    :param upper:区块左上角位置的像素点离图片上边界的距离

    :param right:区块右下角位置的像素点离图片左边界的距离

    :param lower:区块右下角位置的像素点离图片上边界的距离

     故需满足:lower > upper、right > left

    :param save_path: 所截图片保存位置

    """
    img = path  # 打开图像
    box = (left, upper, right, lower)
    roi = img.crop(box)
    # roi.save(save_path)
    return transform(roi)


    # 保存截取的图片



# def getcontent(fcontent,gap):
#     Intgap=gap/9
#     a=torch.Tensor()
#     for i in range(10):
#         pic_path = fcontent
#         # pic_save_dir_path = './out2/0-'+str(i)+".jpg"
#         left, upper, right, lower = Intgap*i, 0, Intgap*i+gap, gap
#         a=torch.cat([a,image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)],dim=1)
#     return a
# def cobtwoten(image_path):
#     fcontent = load_image(image_path, transform=None, shape=[512, 256])
#     Intgap = 256
#     a = torch.Tensor()
#     for i in range(2):
#         pic_path = fcontent
#         #pic_save_dir_path = './out2/0-' + str(i) + ".jpg"
#         left, upper, right, lower = Intgap * i, 0, Intgap * i + Intgap, Intgap
#         a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)], dim=1)
#     return a.unsqueeze(0)

def cobtwoten(image_path):
    fcontent = load_image(image_path, transform=None, shape=[256, 128])
    Intgap = 128/9
    a = torch.Tensor()
    for i in range(10):
        pic_path = fcontent
        #pic_save_dir_path = './out2/0-' + str(i) + ".jpg"
        left, upper, right, lower = Intgap * i, 0, Intgap * i + 128, 128
        a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)], dim=1)
    return a.unsqueeze(0)

def cobtwoten256(image_path):
    fcontent = load_image(image_path, transform=None, shape=[512,256])
    Intgap = 256/9
    a = torch.Tensor()
    for i in range(10):
        pic_path = fcontent
        #pic_save_dir_path = './out2/0-' + str(i) + ".jpg"
        left, upper, right, lower = Intgap * i, 0, Intgap * i + 256, 256
        a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower).unsqueeze(1)], dim=1)
    return a.unsqueeze(0)

#
# fcontent = load_image("./extract/image/0.jpg",transform=None,shape=[256,128])
# Intgap = 128
# a = torch.Tensor()
# for i in range(2):
#     pic_path = fcontent
#     pic_save_dir_path = './out2/0-'+str(i)+".jpg"
#     left, upper, right, lower = Intgap * i, 0, Intgap * i + 128, 128
#     a = torch.cat([a, image_cut_save(pic_path, left, upper, right, lower,pic_save_dir_path).unsqueeze(1)], dim=1)
# print(a.shape)
import numpy as np
def imgsave(image, path):
    image = image.squeeze(0)
    image = image.permute(1, 2, 0)
    image_np = image.cpu().numpy()*255
    image_np = image_np.astype(np.uint8)
    Image.fromarray(image_np).save(path)  # 直接保存PIL图像对象

# lik=["0"]
# for name in lik:
#     videos=cobtwoten("./extract/image/0.jpg").permute(0, 2, 1, 3, 4)
#     print(videos.shape)
#     for i in range(10):
#         frame = videos[:, i, :, :]
#         imgsave(frame, "./out2/"+str(i)+".jpg")