Spaces:
Sleeping
Sleeping
update
Browse files- app.py +1 -1
- myturtle_cv.py +25 -17
app.py
CHANGED
@@ -340,7 +340,7 @@ def run(img_str):
|
|
340 |
from concurrent.futures import ProcessPoolExecutor
|
341 |
from concurrent.futures import as_completed
|
342 |
gif_results = []
|
343 |
-
with ProcessPoolExecutor() as executor:
|
344 |
futures = [executor.submit(run_code, new_folder, i, code) for i, code in enumerate(codes)]
|
345 |
for future in as_completed(futures):
|
346 |
try:
|
|
|
340 |
from concurrent.futures import ProcessPoolExecutor
|
341 |
from concurrent.futures import as_completed
|
342 |
gif_results = []
|
343 |
+
with ProcessPoolExecutor(max_workers=8) as executor:
|
344 |
futures = [executor.submit(run_code, new_folder, i, code) for i, code in enumerate(codes)]
|
345 |
for future in as_completed(futures):
|
346 |
try:
|
myturtle_cv.py
CHANGED
@@ -3,7 +3,7 @@ import cv2
|
|
3 |
|
4 |
|
5 |
def crop_and_scaled_imgs(imgs):
|
6 |
-
PAD =
|
7 |
# use the last image to find the bounding box of the non-white area and the transformation parameters
|
8 |
# and then apply the transformation to all images
|
9 |
|
@@ -18,34 +18,41 @@ def crop_and_scaled_imgs(imgs):
|
|
18 |
|
19 |
# Get the bounding box of the non-zero pixels
|
20 |
x, y, w, h = cv2.boundingRect(coords)
|
21 |
-
x
|
22 |
-
y
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
for i in range(len(imgs)):
|
33 |
# Extract the ROI (region of interest) of the non-white area
|
34 |
roi = imgs[i][y:y+h, x:x+w]
|
35 |
# If the ROI is larger than 256x256, resize it
|
36 |
|
37 |
-
if w >
|
38 |
-
scale = min(
|
39 |
new_w = int(w * scale)
|
40 |
new_h = int(h * scale)
|
41 |
roi = cv2.resize(roi, (new_w, new_h), interpolation=cv2.INTER_AREA)
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
|
44 |
# new_imgs[i] = np.ones((256, 256), dtype=np.uint8) * 255
|
45 |
# centered_img = np.ones((256, 256), dtype=np.uint8) * 255
|
46 |
|
47 |
# Place the ROI in the centered position
|
48 |
-
new_imgs[i][start_y:start_y+
|
49 |
|
50 |
return new_imgs
|
51 |
|
@@ -115,6 +122,7 @@ class Turtle:
|
|
115 |
self.time_since_last_frame += abs(dist) / MOVE_SPEED
|
116 |
# self.frames.append(self.canvas.copy())
|
117 |
# self.save_frame_with_turtle()
|
|
|
118 |
|
119 |
def save_frame_with_turtle(self):
|
120 |
# save the current frame to frames buffer
|
@@ -365,7 +373,7 @@ if __name__ == "__main__":
|
|
365 |
for i in range(7):
|
366 |
with fork_state():
|
367 |
for j in range(4):
|
368 |
-
forward(
|
369 |
left(90.0)
|
370 |
return turtle.frames
|
371 |
|
|
|
3 |
|
4 |
|
5 |
def crop_and_scaled_imgs(imgs):
|
6 |
+
PAD = 5
|
7 |
# use the last image to find the bounding box of the non-white area and the transformation parameters
|
8 |
# and then apply the transformation to all images
|
9 |
|
|
|
18 |
|
19 |
# Get the bounding box of the non-zero pixels
|
20 |
x, y, w, h = cv2.boundingRect(coords)
|
21 |
+
x = max(0, x-PAD)
|
22 |
+
y = max(0, y-PAD)
|
23 |
+
x_end = min(img.shape[1], x+w+2*PAD)
|
24 |
+
y_end = min(img.shape[0], y+h+2*PAD)
|
25 |
+
w = x_end - x
|
26 |
+
h = y_end - y
|
27 |
+
|
28 |
+
SIZE = 400
|
29 |
+
# Calculate the position to center the ROI in the SIZExSIZE image
|
30 |
+
start_x = max(0, (SIZE - w) // 2)
|
31 |
+
start_y = max(0, (SIZE - h) // 2)
|
32 |
+
|
33 |
+
# Create a new SIZExSIZE rgb images
|
34 |
+
new_imgs = [np.ones((SIZE, SIZE, 3), dtype=np.uint8) * 255 for _ in range(len(imgs))]
|
35 |
for i in range(len(imgs)):
|
36 |
# Extract the ROI (region of interest) of the non-white area
|
37 |
roi = imgs[i][y:y+h, x:x+w]
|
38 |
# If the ROI is larger than 256x256, resize it
|
39 |
|
40 |
+
if w > SIZE or h > SIZE:
|
41 |
+
scale = min(SIZE / w, SIZE / h)
|
42 |
new_w = int(w * scale)
|
43 |
new_h = int(h * scale)
|
44 |
roi = cv2.resize(roi, (new_w, new_h), interpolation=cv2.INTER_AREA)
|
45 |
+
else:
|
46 |
+
new_w = w
|
47 |
+
new_h = h
|
48 |
+
|
49 |
+
|
50 |
|
51 |
# new_imgs[i] = np.ones((256, 256), dtype=np.uint8) * 255
|
52 |
# centered_img = np.ones((256, 256), dtype=np.uint8) * 255
|
53 |
|
54 |
# Place the ROI in the centered position
|
55 |
+
new_imgs[i][start_y:start_y+new_h, start_x:start_x+new_w] = roi
|
56 |
|
57 |
return new_imgs
|
58 |
|
|
|
122 |
self.time_since_last_frame += abs(dist) / MOVE_SPEED
|
123 |
# self.frames.append(self.canvas.copy())
|
124 |
# self.save_frame_with_turtle()
|
125 |
+
# print(self.x, self.y)
|
126 |
|
127 |
def save_frame_with_turtle(self):
|
128 |
# save the current frame to frames buffer
|
|
|
373 |
for i in range(7):
|
374 |
with fork_state():
|
375 |
for j in range(4):
|
376 |
+
forward(3*i)
|
377 |
left(90.0)
|
378 |
return turtle.frames
|
379 |
|