File size: 2,067 Bytes
29572cc
 
 
 
 
 
 
 
 
 
 
 
 
7b662f1
29572cc
 
 
 
 
 
 
5b0f9d5
29572cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b995b9b
29572cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import numpy as np
import cv2
from landmark_utils import detect_frames_track

def detect_track(video):
    vidcap = cv2.VideoCapture(video)
    frames = []
    while True:
        success, image = vidcap.read()
        if success:
            frames.append(image)
        else:
            break
    raw_data = detect_frames_track(frames)

    vidcap.release()
    return np.array(raw_data)

def extract_landmark(video):
    raw_data = detect_track(video)
    if len(raw_data) == 0:
        print("No face detected", video)
    else:
        np.savetxt(video + ".txt", raw_data, fmt='%1.5f')
        path = video + ".txt"
    return path

def get_data_for_test(path, fake, block):  # fake:manipulated=1 original=0
    file = path
    x = []
    x_diff = []
    y = []

    video_y = []
    count_y = {}
    sample_to_video = []

    # for file in tqdm(files):
    vectors = np.loadtxt(file)
    print("vectors = ",vectors)
    video_y.append(fake)

    for i in range(0, vectors.shape[0] - block, block):
        vec = vectors[i:i + block, :]
        x.append(vec)
        vec_next = vectors[i + 1:i + block, :]
        vec_next = np.pad(vec_next, ((0, 1), (0, 0)), 'constant', constant_values=(0, 0))
        vec_diff = (vec_next - vec)[:block - 1, :]
        x_diff.append(vec_diff)

        y.append(fake)

        # Dict for counting number of samples in video
        if file not in count_y:
            count_y[file] = 1
        else:
            count_y[file] += 1

        sample_to_video.append(file)
    return np.array(x), np.array(x_diff), np.array(y), np.array(video_y), np.array(sample_to_video), count_y

def merge_video_prediction(mix_prediction, s2v, vc):
    prediction_video = []
    pre_count = {}
    for p, v_label in zip(mix_prediction, s2v):
        p_bi = 0
        if p >= 0.5:
            p_bi = 1
        if v_label in pre_count:
            pre_count[v_label] += p_bi
        else:
            pre_count[v_label] = p_bi
    for key in pre_count.keys():
        prediction_video.append(pre_count[key] / vc[key])
    return prediction_video