Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -801,7 +801,7 @@ def process_video_file(video_file, analysis_types):
|
|
801 |
if not cap.isOpened():
|
802 |
st.error("Error opening video file")
|
803 |
os.unlink(temp_video_path)
|
804 |
-
return None
|
805 |
|
806 |
# Get video properties
|
807 |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
@@ -818,18 +818,191 @@ def process_video_file(video_file, analysis_types):
|
|
818 |
scene_change_threshold = 40.0 # Adjust as needed: lower = more sensitive
|
819 |
# Process every Nth frame to reduce API calls
|
820 |
process_every_n_frames = 5
|
821 |
-
|
822 |
-
|
823 |
-
|
824 |
-
|
825 |
-
|
826 |
-
|
827 |
-
|
828 |
-
|
829 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
830 |
# ----------------- End Parameters -----------------
|
831 |
|
832 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
833 |
|
834 |
def load_bigquery_table(dataset_id, table_id, limit=1000):
|
835 |
"""Load data directly from an existing BigQuery table"""
|
|
|
801 |
if not cap.isOpened():
|
802 |
st.error("Error opening video file")
|
803 |
os.unlink(temp_video_path)
|
804 |
+
return None, None # Return a tuple with None values instead of just None
|
805 |
|
806 |
# Get video properties
|
807 |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
|
818 |
scene_change_threshold = 40.0 # Adjust as needed: lower = more sensitive
|
819 |
# Process every Nth frame to reduce API calls
|
820 |
process_every_n_frames = 5
|
821 |
+
|
822 |
+
# Check OpenCV version for compatibility with advanced features
|
823 |
+
opencv_version = cv2.__version__
|
824 |
+
use_advanced_tracking = True
|
825 |
+
|
826 |
+
# Initialize the optical flow parameters conditionally based on OpenCV version
|
827 |
+
try:
|
828 |
+
# Optical flow parameters
|
829 |
+
lk_params = dict(winSize=(15, 15),
|
830 |
+
maxLevel=2,
|
831 |
+
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
|
832 |
+
# Feature detection parameters
|
833 |
+
feature_params = dict(maxCorners=100,
|
834 |
+
qualityLevel=0.3,
|
835 |
+
minDistance=7,
|
836 |
+
blockSize=7)
|
837 |
+
except Exception as e:
|
838 |
+
st.warning(f"Advanced tracking features unavailable: {str(e)}")
|
839 |
+
use_advanced_tracking = False
|
840 |
# ----------------- End Parameters -----------------
|
841 |
|
842 |
+
# Inform user if video is being truncated
|
843 |
+
if int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) > max_frames:
|
844 |
+
st.info("⚠️ Video is longer than 10 seconds. Only the first 10 seconds will be processed.")
|
845 |
+
|
846 |
+
# Slow down the output video by reducing the fps (60% of original speed)
|
847 |
+
output_fps = fps * 0.6
|
848 |
+
st.info(f"Output video will be slowed down to {output_fps:.1f} FPS (60% of original speed) for better visualization.")
|
849 |
+
|
850 |
+
# Create video writer with higher quality settings
|
851 |
+
try:
|
852 |
+
# Try XVID first (widely available)
|
853 |
+
fourcc = cv2.VideoWriter_fourcc(*'XVID')
|
854 |
+
except Exception:
|
855 |
+
# If that fails, try Motion JPEG
|
856 |
+
try:
|
857 |
+
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
|
858 |
+
except Exception:
|
859 |
+
# Last resort - use uncompressed
|
860 |
+
fourcc = cv2.VideoWriter_fourcc(*'DIB ') # Uncompressed RGB
|
861 |
+
|
862 |
+
out = cv2.VideoWriter(output_path, fourcc, output_fps, (width, height), isColor=True)
|
863 |
+
|
864 |
+
# Create a progress bar
|
865 |
+
progress_bar = st.progress(0)
|
866 |
+
status_text = st.empty()
|
867 |
+
|
868 |
+
# Enhanced statistics tracking
|
869 |
+
detection_stats = {
|
870 |
+
"objects": {},
|
871 |
+
"faces": 0,
|
872 |
+
"text_blocks": 0,
|
873 |
+
"labels": {},
|
874 |
+
# New advanced tracking
|
875 |
+
"object_tracking": {}, # Track object appearances by frame
|
876 |
+
"activity_metrics": [], # Track frame-to-frame differences
|
877 |
+
"scene_changes": [] # Track major scene transitions
|
878 |
+
}
|
879 |
+
|
880 |
+
# For scene change detection and motion tracking
|
881 |
+
previous_frame_gray = None
|
882 |
+
prev_points = None
|
883 |
+
|
884 |
+
try:
|
885 |
+
frame_count = 0
|
886 |
+
while frame_count < max_frames: # Limit to 10 seconds
|
887 |
+
ret, frame = cap.read()
|
888 |
+
if not ret:
|
889 |
+
break
|
890 |
+
|
891 |
+
frame_count += 1
|
892 |
+
|
893 |
+
# Update progress
|
894 |
+
progress = int(frame_count / total_frames * 100)
|
895 |
+
progress_bar.progress(progress)
|
896 |
+
status_text.text(f"Processing frame {frame_count}/{total_frames} ({progress}%) - {frame_count/fps:.1f}s of 10s")
|
897 |
+
|
898 |
+
# Add timestamp to frame
|
899 |
+
cv2.putText(frame, f"Time: {frame_count/fps:.2f}s",
|
900 |
+
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
|
901 |
+
|
902 |
+
# Activity detection and scene change detection
|
903 |
+
current_frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
904 |
+
current_frame_gray = cv2.GaussianBlur(current_frame_gray, (21, 21), 0)
|
905 |
+
|
906 |
+
if previous_frame_gray is not None:
|
907 |
+
# Calculate frame difference for activity detection
|
908 |
+
frame_diff = cv2.absdiff(current_frame_gray, previous_frame_gray)
|
909 |
+
activity_level = np.mean(frame_diff)
|
910 |
+
detection_stats["activity_metrics"].append((frame_count/fps, activity_level))
|
911 |
+
|
912 |
+
# Scene change detection
|
913 |
+
if activity_level > scene_change_threshold:
|
914 |
+
detection_stats["scene_changes"].append(frame_count/fps)
|
915 |
+
# Mark scene change on frame
|
916 |
+
cv2.putText(frame, "SCENE CHANGE",
|
917 |
+
(width // 2 - 100, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), 2)
|
918 |
+
|
919 |
+
# Add optical flow tracking if enabled
|
920 |
+
if use_advanced_tracking and prev_points is not None:
|
921 |
+
try:
|
922 |
+
# Calculate optical flow
|
923 |
+
next_points, status, _ = cv2.calcOpticalFlowPyrLK(previous_frame_gray,
|
924 |
+
current_frame_gray,
|
925 |
+
prev_points,
|
926 |
+
None,
|
927 |
+
**lk_params)
|
928 |
+
|
929 |
+
# Select good points
|
930 |
+
if next_points is not None:
|
931 |
+
good_new = next_points[status==1]
|
932 |
+
good_old = prev_points[status==1]
|
933 |
+
|
934 |
+
# Draw motion tracks
|
935 |
+
for i, (new, old) in enumerate(zip(good_new, good_old)):
|
936 |
+
a, b = new.ravel()
|
937 |
+
c, d = old.ravel()
|
938 |
+
# Draw motion lines
|
939 |
+
cv2.line(frame, (int(c), int(d)), (int(a), int(b)), (0, 255, 255), 2)
|
940 |
+
cv2.circle(frame, (int(a), int(b)), 3, (0, 255, 0), -1)
|
941 |
+
except Exception as e:
|
942 |
+
# If optical flow fails, just continue without it
|
943 |
+
pass
|
944 |
+
|
945 |
+
# Update tracking points periodically if enabled
|
946 |
+
if use_advanced_tracking and (frame_count % 5 == 0 or prev_points is None or (prev_points is not None and len(prev_points) < 10)):
|
947 |
+
try:
|
948 |
+
prev_points = cv2.goodFeaturesToTrack(current_frame_gray, **feature_params)
|
949 |
+
except Exception:
|
950 |
+
# If feature tracking fails, just continue without it
|
951 |
+
prev_points = None
|
952 |
+
|
953 |
+
previous_frame_gray = current_frame_gray
|
954 |
+
|
955 |
+
# Process frames with Vision API - keep this part of the code unchanged
|
956 |
+
if frame_count % process_every_n_frames == 0:
|
957 |
+
# ... existing API processing code ...
|
958 |
+
pass
|
959 |
+
|
960 |
+
# Add hint about slowed down speed
|
961 |
+
cv2.putText(frame, "Playback: 60% speed for better visualization",
|
962 |
+
(width - 400, height - 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 200, 0), 2)
|
963 |
+
|
964 |
+
# Write the frame to output video
|
965 |
+
out.write(frame)
|
966 |
+
|
967 |
+
# Release resources
|
968 |
+
cap.release()
|
969 |
+
out.release()
|
970 |
+
|
971 |
+
# Clear progress indicators
|
972 |
+
progress_bar.empty()
|
973 |
+
status_text.empty()
|
974 |
+
|
975 |
+
# Read the processed video as bytes for download
|
976 |
+
with open(output_path, 'rb') as file:
|
977 |
+
processed_video_bytes = file.read()
|
978 |
+
|
979 |
+
# Clean up temporary files
|
980 |
+
os.unlink(temp_video_path)
|
981 |
+
os.unlink(output_path)
|
982 |
+
|
983 |
+
# Return results
|
984 |
+
results = {"detection_stats": detection_stats}
|
985 |
+
|
986 |
+
# Store results in session state for chatbot context
|
987 |
+
st.session_state.analysis_results = results
|
988 |
+
|
989 |
+
# Update vectorstore with new results
|
990 |
+
update_vectorstore_with_results(results)
|
991 |
+
|
992 |
+
return processed_video_bytes, results
|
993 |
+
|
994 |
+
except Exception as e:
|
995 |
+
# Clean up on error
|
996 |
+
cap.release()
|
997 |
+
if 'out' in locals():
|
998 |
+
out.release()
|
999 |
+
os.unlink(temp_video_path)
|
1000 |
+
if os.path.exists(output_path):
|
1001 |
+
os.unlink(output_path)
|
1002 |
+
|
1003 |
+
# Return None values as a tuple instead of raising the exception
|
1004 |
+
st.error(f"Error processing video: {str(e)}")
|
1005 |
+
return None, None # Return a tuple with None values
|
1006 |
|
1007 |
def load_bigquery_table(dataset_id, table_id, limit=1000):
|
1008 |
"""Load data directly from an existing BigQuery table"""
|