File size: 6,408 Bytes
caa1b69
3adf118
caa1b69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3adf118
 
 
 
 
 
 
 
 
 
 
 
 
42e82ef
3adf118
 
 
 
 
 
 
 
caa1b69
0639f56
 
caa1b69
3adf118
caa1b69
 
3adf118
 
 
 
 
 
caa1b69
0639f56
caa1b69
3adf118
 
caa1b69
0639f56
caa1b69
 
 
 
0639f56
 
 
 
 
 
3adf118
0639f56
caa1b69
 
 
0639f56
 
 
 
 
 
 
3adf118
caa1b69
0639f56
3adf118
 
caa1b69
 
 
 
0639f56
 
 
 
3adf118
42e82ef
 
0639f56
 
 
 
 
 
 
 
 
 
 
3adf118
 
 
 
 
 
42e82ef
 
3adf118
 
 
0639f56
caa1b69
 
0639f56
 
caa1b69
3adf118
caa1b69
 
 
 
0639f56
3adf118
 
caa1b69
42e82ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
caa1b69
 
 
 
 
42e82ef
 
 
 
 
caa1b69
42e82ef
 
 
caa1b69
 
42e82ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
from interactive_pipe import interactive_pipeline, interactive, Image
from typing import Tuple
import numpy as np

# Helper functions
# ----------------


def flip_image(img, flip=True, mirror=True):
    img = img[::-1] if flip else img
    img = img[:, ::-1] if mirror else img
    return img


def get_crop(img, pos_x, pos_y, crop_size=0.1):
    c_size = int(crop_size * img.shape[0])
    crop_x = (int(pos_x * img.shape[1]), int((pos_x) * img.shape[1]) + c_size)
    crop_y = (int(pos_y * img.shape[0]), int((pos_y) * img.shape[0]) + c_size)
    return crop_x, crop_y

# Processing blocks
# -----------------


def generate_feedback_ribbon() -> Tuple[np.ndarray, np.ndarray]:
    """Generate green and red ribbons for feedback"""
    flat_array = np.ones((800, 12, 3))
    colors = [[0., 1., 0.], [1., 0., 0.]]
    ribbons = [flat_array*np.array(col)[None, None, :] for col in colors]
    return ribbons[0], ribbons[1]


DIFFICULY = {"easy": 0.18, "medium": 0.1, "hard": 0.05}
DIFFICULY_LEVELS = list(DIFFICULY.keys())


def generate_random_puzzle(
    seed: int = 43,
    difficulty: str = DIFFICULY_LEVELS[0],
    context: dict = {}
):
    """Generate random puzzle configuration and store in context.

    Configuration = 2D position and flip/mirror.
    Freeze seed for reproducibility.
    """
    np.random.seed(seed)
    pos_x, pos_y = np.random.uniform(0.2, 0.8, 2)
    context["puzzle_pos"] = (pos_x, pos_y)
    context["puzzle_flip_mirror"] = np.random.choice([True, False], 2)
    context["puzzle_piece_size"] = DIFFICULY.get(difficulty, 0.18)


def create_puzzle(
    img: np.ndarray,
    intensity: float = 0.4,
    context: dict = {}
) -> Tuple[np.ndarray, np.ndarray]:
    """Extract puzzle piece from image. Make a dark hole where the """
    out = img.copy()
    x_gt, y_gt = context["puzzle_pos"]
    flip_gt, mirror_gt = context["puzzle_flip_mirror"]
    cs_x, cs_y = get_crop(
        img, x_gt, y_gt, crop_size=context["puzzle_piece_size"])
    crop = img[cs_y[0]:cs_y[1], cs_x[0]:cs_x[1], ...]
    out[cs_y[0]:cs_y[1], cs_x[0]:cs_x[1]] = intensity*crop
    crop = flip_image(crop, flip=flip_gt, mirror=mirror_gt)
    return out, crop


def flip_mirror_piece(
    piece: np.ndarray,
    flip: bool = False,
    mirror: bool = False,
    context: dict = {}
) -> np.ndarray:
    """Flip and/or mirror the puzzle piece."""
    context["user_flip_mirror"] = (flip, mirror)
    return flip_image(piece.copy(), flip=flip, mirror=mirror)


def place_puzzle(
    puzzle: np.ndarray,
    piece: np.ndarray,
    pos_x: float = 0.5,
    pos_y: float = 0.5,
    context: dict = {}
) -> np.ndarray:
    """Place the puzzle piece at the user-defined position."""
    out = puzzle.copy()
    context["user_pos"] = (pos_x, pos_y)
    cp_x, cp_y = get_crop(
        img, pos_x, pos_y, crop_size=context["puzzle_piece_size"])
    out[cp_y[0]:cp_y[1], cp_x[0]:cp_x[1]] = piece
    return out


TOLERANCES = {"low": 0.01, "medium": 0.02, "high": 0.05}
TOLERANCE_LEVELS = list(TOLERANCES.keys())


def check_puzzle(tolerance: str = "low", context: dict = {}) -> None:
    """Check if the user placed the puzzle piece correctly.
    Store the result in the context."""
    x_gt, y_gt = context["puzzle_pos"]
    flip_gt, mirror_gt = context["puzzle_flip_mirror"]
    x, y = context["user_pos"]
    flip, mirror = context["user_flip_mirror"]
    check_pos = np.allclose([x_gt, y_gt], [x, y],
                            atol=TOLERANCES.get(tolerance, 0.01))
    check_flip_mirror = (flip_gt == flip) and (mirror_gt == mirror)
    success = check_pos and check_flip_mirror
    context["success"] = success


def display_feedback(
    puzzle: np.ndarray,
    ok_ribbon: np.ndarray,
    nok_ribbon: np.ndarray,
    context: dict = {}
) -> np.ndarray:
    """Display green/red ribbon on the right side of the puzzle."""
    success = context.get("success", False)
    ribbon = ok_ribbon if success else nok_ribbon
    out = np.hstack([puzzle, ribbon[:puzzle.shape[0], ...]])
    return out

# pipeline definition
# -------------------


def captcha_pipe(inp):
    ok_ribbon, nok_ribbon = generate_feedback_ribbon()
    generate_random_puzzle()
    puzzle, puzzle_piece = create_puzzle(inp)
    puzzle_piece = flip_mirror_piece(puzzle_piece)
    puzzle = place_puzzle(puzzle, puzzle_piece)
    check_puzzle()
    puzzle = display_feedback(puzzle, ok_ribbon, nok_ribbon)
    return puzzle

# add interactivity
# -----------------


def main(img: np.ndarray, backend="gradio", debug: bool = False):
    # If debug mode, add interactive sliders to tune the puzzle generation
    # and help the "game master" design a feasible puzzle.
    if debug:
        interactive(
            tolerance=(TOLERANCE_LEVELS[0], TOLERANCE_LEVELS, "Tolerance")
        )(check_puzzle)
        interactive(
            seed=(43, [0, 100], "Puzzle seed"),
            difficulty=(DIFFICULY_LEVELS[0], DIFFICULY_LEVELS, "Difficulty")
        )(generate_random_puzzle)
    interactive(
        pos_x=(0.5, [0.1, 0.9, 0.005], "Position X", ["left", "right"]),
        pos_y=(0.5, [0.1, 0.9, 0.005], "Position Y", ["up", "down"]),
    )(place_puzzle)
    # left, right, up, down will only supported when using the Qt backend
    interactive(
        flip=(False, "Flip Image"),
        mirror=(False, "Mirror Image"),
    )(flip_mirror_piece)
    captcha_pipe_interactive = interactive_pipeline(
        gui=backend,
        cache=True,
        markdown_description=markdown_description
    )(captcha_pipe)
    captcha_pipe_interactive(img)


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-b", "--backend", default="gradio",
                        choices=["gradio", "qt", "mpl"], type=str)
    parser.add_argument(
        "-d", "--debug", action="store_true",
        help="Debug mode (to tune difficulty and tolerance)"
    )
    args = parser.parse_args()
    markdown_description = "# Code to build this app on gradio \n\n"
    markdown_description += "In local, try using `python app.py --backend qt --debug` to get the best experience with keyboard support aswell \n\n"
    markdown_description += "Please note that matplobi`--backend mpl` is also functional although it won't look as good\n\n"
    markdown_description += "```python\n"+open(__file__, 'r').read()+"```"
    img = Image.load_image("sample.jpg")
    main(img, backend=args.backend, debug=args.debug)