from fastapi import APIRouter from datetime import datetime from datasets import load_dataset from sklearn.metrics import accuracy_score import numpy as np import os import torch import gc import psutil from transformers import Wav2Vec2ForSequenceClassification, AutoFeatureExtractor, pipeline from .utils.evaluation import AudioEvaluationRequest from .utils.emissions import tracker, clean_emissions_data, get_space_info from dotenv import load_dotenv import logging import csv import torch.nn.utils.prune as prune # Configurer le logging logging.basicConfig(level=logging.INFO) logging.info("Début du fichier python") load_dotenv() router = APIRouter() DESCRIPTION = "Random Baseline" ROUTE = "/audio" device = 0 if torch.cuda.is_available() else -1 def preprocess_function(example, feature_extractor): return feature_extractor( [x["array"] for x in example["audio"]], sampling_rate=feature_extractor.sampling_rate, padding="longest", max_length=16000, truncation=True, return_tensors="pt" ) def apply_pruning(model, amount=0.3): """Applique un pruning sur les poids du modèle.""" for name, module in model.named_modules(): if isinstance(module, torch.nn.Linear): prune.l1_unstructured(module, name="weight", amount=amount) prune.remove(module, "weight") return model @router.post(ROUTE, tags=["Audio Task"], description=DESCRIPTION) async def evaluate_audio(request: AudioEvaluationRequest): """ Evaluate audio classification for rainforest sound detection. """ # Get space info username, space_url = get_space_info() logging.info("Chargement des données") dataset = load_dataset(request.dataset_name, streaming=True, token=os.getenv("HF_TOKEN")) logging.info("Données chargées") test_dataset = dataset["test"] del dataset # Start tracking emissions tracker.start() tracker.start_task("inference") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") test_dataset = test_dataset.map(preprocess_function, fn_kwargs={"feature_extractor": feature_extractor}, remove_columns="audio", batched=True, batch_size=32) gc.collect() model_name = "CindyDelage/Challenge_HuggingFace_DFG_FrugalAI" model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name) # Appliquer la quantification dynamique et le pruning model.eval() model = torch.quantization.quantize_dynamic(model, dtype=torch.qint8) model = apply_pruning(model, amount=0.3) # Prune 30% des poids linéaires classifier = pipeline("audio-classification", model=model, feature_extractor=feature_extractor, device=device) predictions = [] logging.info("Début des prédictions par batch") for data in iter(test_dataset): with torch.no_grad(): result = classifier(np.asarray(data["input_values"]), batch_size=1) predicted_label = result[0]['label'] label = 1 if predicted_label == 'environment' else 0 predictions.append(label) # Nettoyer la mémoire après chaque itération del result del label torch.cuda.empty_cache() gc.collect() logging.info("Fin des prédictions") del classifier del feature_extractor gc.collect() # Stop tracking emissions emissions_data = tracker.stop_task() true_labels = [] for example in test_dataset: true_labels.append(example["label"]) accuracy = accuracy_score(true_labels, predictions) results = { "username": username, "space_url": space_url, "submission_timestamp": datetime.now().isoformat(), "model_description": DESCRIPTION, "accuracy": float(accuracy), "energy_consumed_wh": emissions_data.energy_consumed * 1000, "emissions_gco2eq": emissions_data.emissions * 1000, "emissions_data": clean_emissions_data(emissions_data), "api_route": ROUTE, "dataset_config": { "dataset_name": request.dataset_name, "test_size": request.test_size, "test_seed": request.test_seed } } logging.info("Returning results") return results