from flask import Flask, render_template, request, jsonify import numpy as np from sklearn.linear_model import LogisticRegression import cv2 import os from werkzeug.utils import secure_filename app = Flask(__name__) # Configure upload folder UPLOAD_FOLDER = 'uploads' os.makedirs(UPLOAD_FOLDER, exist_ok=True) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER ALLOWED_EXTENSIONS = {'mp4', 'avi', 'mov'} # Dummy ML model for LBW decision def train_dummy_model(): X = np.array([ [0.5, 0.0, 0.4, 0.5, 30, 0], # Not Out [0.5, 0.5, 0.5, 0.5, 35, 2], # Out [0.6, 0.2, 0.5, 0.6, 32, 1], # Not Out [0.5, 0.4, 0.5, 0.4, 34, 0], # Out ]) y = np.array([0, 1, 0, 1]) model = LogisticRegression() model.fit(X, y) return model model = train_dummy_model() # Check allowed file extensions def allowed_file(filename): return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS # Process video to extract ball trajectory def process_video(video_path): cap = cv2.VideoCapture(video_path) if not cap.isOpened(): return None, None, "Failed to open video" # Lists to store trajectory points actual_path = [] frame_count = 0 total_speed = 0 spin = 0 # Simplified: Assume no spin for now while cap.isOpened(): ret, frame = cap.read() if not ret: break # Convert to HSV and detect ball (assuming a red ball) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, (0, 120, 70), (10, 255, 255)) contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if contours: c = max(contours, key=cv2.contourArea) x, y, w, h = cv2.boundingRect(c) center_x = x + w / 2 center_y = y + h / 2 # Normalize coordinates to 0-1 (assuming 1280x720 video resolution) norm_x = center_x / 1280 norm_y = center_y / 720 actual_path.append({"x": norm_x, "y": norm_y}) frame_count += 1 if frame_count > 30: # Process first 30 frames for simplicity break cap.release() if not actual_path: return None, None, "No ball detected in video" # Assume last point is impact, calculate pitching as midpoint pitching_x = actual_path[len(actual_path)//2]["x"] pitching_y = actual_path[len(actual_path)//2]["y"] impact_x = actual_path[-1]["x"] impact_y = actual_path[-1]["y"] # Simulate speed (frames per second to m/s, rough estimate) fps = cap.get(cv2.CAP_PROP_FPS) or 30 speed = (len(actual_path) / (frame_count / fps)) * 0.5 # Simplified conversion # Projected path (linear from impact to stumps, adjusted for spin) projected_path = [ {"x": impact_x, "y": impact_y}, {"x": impact_x + spin * 0.1, "y": 1.0} # Stumps at y=1.0 ] return actual_path, projected_path, pitching_x, pitching_y, impact_x, impact_y, speed, spin @app.route('/') def index(): return render_template('index.html') @app.route('/analyze', methods=['POST']) def analyze(): if 'video' not in request.files: return jsonify({'error': 'No video uploaded'}), 400 file = request.files['video'] if file.filename == '' or not allowed_file(file.filename): return jsonify({'error': 'Invalid file'}), 400 # Save the uploaded video filename = secure_filename(file.filename) video_path = os.path.join(app.config['UPLOAD_FOLDER'], filename) file.save(video_path) # Process video actual_path, projected_path, pitching_x, pitching_y, impact_x, impact_y, speed, spin = process_video(video_path) if actual_path is None: return jsonify({'error': projected_path}), 400 # projected_path holds error message here # Predict LBW decision features = np.array([[pitching_x, pitching_y, impact_x, impact_y, speed, spin]]) prediction = model.predict(features)[0] confidence = model.predict_proba(features)[0][prediction] decision = "Out" if prediction == 1 else "Not Out" # Clean up os.remove(video_path) return jsonify({ 'actual_path': actual_path, 'projected_path': projected_path, 'decision': decision, 'confidence': round(confidence, 2), 'pitching': {'x': pitching_x, 'y': pitching_y}, 'impact': {'x': impact_x, 'y': impact_y} }) if __name__ == '__main__': app.run(host='0.0.0.0', port=7860, debug=True)