aidancer / dance_generator.py
ombhojane's picture
Upload dance_generator.py
b446ed6 verified
import numpy as np
import cv2
from scipy.interpolate import interp1d
class DanceGenerator:
def __init__(self):
self.prev_moves = []
self.style_memory = []
self.avatar = cv2.imread('assets/dancer_avatar.png') # Add a dancer avatar image
def generate_dance_sequence(self, all_poses, mode, total_frames, frame_size):
"""Generate complete dance sequence for the entire video"""
height, width = frame_size
sequence = []
if mode == "Sync Partner":
sequence = self._generate_sync_sequence(all_poses, total_frames, frame_size)
else:
sequence = self._generate_creative_sequence(all_poses, total_frames, frame_size)
return sequence
def _generate_sync_sequence(self, all_poses, total_frames, frame_size):
"""Generate synchronized dance sequence"""
height, width = frame_size
sequence = []
# Convert all poses to arrays
pose_arrays = []
for pose in all_poses:
if pose is not None:
pose_arrays.append(self._landmarks_to_array(pose))
else:
pose_arrays.append(None)
# Generate mirrored sequence with smooth transitions
for i in range(total_frames):
frame = np.zeros((height, width, 3), dtype=np.uint8)
if pose_arrays[i] is not None:
# Mirror the pose
mirrored = self._mirror_movements(pose_arrays[i])
# Add smooth transition from previous frame
if i > 0 and pose_arrays[i-1] is not None:
mirrored = self._smooth_transition(pose_arrays[i-1], mirrored, 0.3)
# Create dance frame
frame = self._create_enhanced_dance_frame(
mirrored,
frame_size,
add_effects=True
)
sequence.append(frame)
return sequence
def _generate_creative_sequence(self, all_poses, total_frames, frame_size):
"""Generate creative dance sequence based on style"""
height, width = frame_size
sequence = []
# Analyze style from all poses
style_patterns = self._analyze_style_patterns(all_poses)
# Generate new sequence using style patterns
for i in range(total_frames):
frame = np.zeros((height, width, 3), dtype=np.uint8)
# Generate new pose based on style
new_pose = self._generate_style_based_pose(style_patterns, i/total_frames)
if new_pose is not None:
frame = self._create_enhanced_dance_frame(
new_pose,
frame_size,
add_effects=True
)
sequence.append(frame)
return sequence
def _analyze_style_patterns(self, poses):
"""Analyze dance style patterns from poses"""
patterns = []
for pose in poses:
if pose is not None:
landmarks = self._landmarks_to_array(pose)
patterns.append(landmarks)
return patterns
def _generate_style_based_pose(self, patterns, progress):
"""Generate new pose based on style patterns and progress"""
if not patterns:
return None
# Create smooth interpolation between poses
num_patterns = len(patterns)
pattern_idx = int(progress * (num_patterns - 1))
if pattern_idx < num_patterns - 1:
t = progress * (num_patterns - 1) - pattern_idx
pose = self._interpolate_poses(
patterns[pattern_idx],
patterns[pattern_idx + 1],
t
)
else:
pose = patterns[-1]
return pose
def _interpolate_poses(self, pose1, pose2, t):
"""Smoothly interpolate between two poses"""
return pose1 * (1 - t) + pose2 * t
def _create_enhanced_dance_frame(self, pose_array, frame_size, add_effects=True):
"""Create enhanced visualization frame with effects"""
height, width = frame_size
frame = np.zeros((height, width, 3), dtype=np.uint8)
# Convert coordinates
points = (pose_array[:, :2] * [width, height]).astype(int)
# Draw enhanced skeleton
connections = self._get_pose_connections()
for connection in connections:
start_idx, end_idx = connection
if start_idx < len(points) and end_idx < len(points):
# Draw glowing lines
if add_effects:
self._draw_glowing_line(
frame,
points[start_idx],
points[end_idx],
(0, 255, 0)
)
else:
cv2.line(frame,
tuple(points[start_idx]),
tuple(points[end_idx]),
(0, 255, 0), 2)
# Draw enhanced joints
for point in points:
if add_effects:
self._draw_glowing_point(frame, point, (0, 0, 255))
else:
cv2.circle(frame, tuple(point), 4, (0, 0, 255), -1)
return frame
def _draw_glowing_line(self, frame, start, end, color, thickness=2):
"""Draw a line with glow effect"""
# Draw main line
cv2.line(frame, tuple(start), tuple(end), color, thickness)
# Draw glow
for i in range(3):
alpha = 0.3 - i * 0.1
thickness = thickness + 2
cv2.line(frame, tuple(start), tuple(end),
tuple([int(c * alpha) for c in color]),
thickness)
def _draw_glowing_point(self, frame, point, color, radius=4):
"""Draw a point with glow effect"""
# Draw main point
cv2.circle(frame, tuple(point), radius, color, -1)
# Draw glow
for i in range(3):
alpha = 0.3 - i * 0.1
r = radius + i * 2
cv2.circle(frame, tuple(point), r,
tuple([int(c * alpha) for c in color]),
-1)
def _landmarks_to_array(self, landmarks):
"""Convert MediaPipe landmarks to numpy array"""
points = []
for landmark in landmarks.landmark:
points.append([landmark.x, landmark.y, landmark.z])
return np.array(points)
def _mirror_movements(self, landmarks):
"""Mirror the input movements"""
mirrored = landmarks.copy()
mirrored[:, 0] = 1 - mirrored[:, 0] # Flip x coordinates
return mirrored
def _update_style_memory(self, landmarks):
"""Update memory of dance style"""
self.style_memory.append(landmarks)
if len(self.style_memory) > 30: # Keep last 30 frames
self.style_memory.pop(0)
def _generate_style_based_moves(self):
"""Generate new moves based on learned style"""
if not self.style_memory:
return np.zeros((33, 3)) # Default pose shape
# Simple implementation: interpolate between stored poses
base_pose = self.style_memory[-1]
if len(self.style_memory) > 1:
prev_pose = self.style_memory[-2]
t = np.random.random()
new_pose = t * base_pose + (1-t) * prev_pose
else:
new_pose = base_pose
return new_pose
def _create_dance_frame(self, pose_array):
"""Create visualization frame from pose array"""
frame = np.zeros((480, 640, 3), dtype=np.uint8)
# Convert normalized coordinates to pixel coordinates
points = (pose_array[:, :2] * [640, 480]).astype(int)
# Draw connections between joints
connections = self._get_pose_connections()
for connection in connections:
start_idx, end_idx = connection
if start_idx < len(points) and end_idx < len(points):
cv2.line(frame,
tuple(points[start_idx]),
tuple(points[end_idx]),
(0, 255, 0), 2)
# Draw joints
for point in points:
cv2.circle(frame, tuple(point), 4, (0, 0, 255), -1)
return frame
def _get_pose_connections(self):
"""Define connections between pose landmarks"""
return [
(0, 1), (1, 2), (2, 3), (3, 7), # Face
(0, 4), (4, 5), (5, 6), (6, 8),
(9, 10), (11, 12), (11, 13), (13, 15), # Arms
(12, 14), (14, 16),
(11, 23), (12, 24), # Torso
(23, 24), (23, 25), (24, 26), # Legs
(25, 27), (26, 28), (27, 29), (28, 30),
(29, 31), (30, 32)
]
def _smooth_transition(self, prev_pose, current_pose, smoothing_factor=0.3):
"""Create smooth transition between poses"""
if prev_pose is None or current_pose is None:
return current_pose
# Interpolate between previous and current pose
smoothed_pose = (1 - smoothing_factor) * prev_pose + smoothing_factor * current_pose
# Ensure the smoothed pose maintains proper proportions
# Normalize joint positions relative to hip center
hip_center_idx = 23 # Index for hip center landmark
prev_hip = prev_pose[hip_center_idx]
current_hip = current_pose[hip_center_idx]
smoothed_hip = smoothed_pose[hip_center_idx]
# Adjust positions relative to hip center
for i in range(len(smoothed_pose)):
if i != hip_center_idx:
# Calculate relative positions
prev_relative = prev_pose[i] - prev_hip
current_relative = current_pose[i] - current_hip
# Interpolate relative positions
smoothed_relative = (1 - smoothing_factor) * prev_relative + smoothing_factor * current_relative
# Update smoothed pose
smoothed_pose[i] = smoothed_hip + smoothed_relative
return smoothed_pose