File size: 2,156 Bytes
48e4064
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
# handler.py
from typing import Dict, Any
from transformers import AutoProcessor, MusicgenForConditionalGeneration
import torch
import numpy as np

class EndpointHandler:
    def __init__(self, path=""):
        # Load model and processor from path
        self.processor = AutoProcessor.from_pretrained(path)
        self.model = MusicgenForConditionalGeneration.from_pretrained(
            path, 
            torch_dtype=torch.float16
        ).to("cuda")
        
    def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
        """
        Args:
            data (Dict): The request data, containing:
                - inputs (Dict): Contains 'prompt' and optional 'duration'
                - parameters (Dict, optional): Generation parameters
        """
        # Extract inputs and parameters
        inputs = data.pop("inputs", data)
        parameters = data.pop("parameters", {})
        
        # Get prompt and duration
        prompt = inputs.get("prompt", "")
        duration = inputs.get("duration", 30)  # Default 30 seconds
        
        # Calculate max_new_tokens based on duration
        # MusicGen generates audio at 32000 Hz, with each token representing 1024 samples
        samples_per_token = 1024
        sampling_rate = 32000
        max_new_tokens = int((duration * sampling_rate) / samples_per_token)
        
        # Process input text
        inputs = self.processor(
            text=[prompt],
            padding=True,
            return_tensors="pt"
        ).to("cuda")
        
        # Set default generation parameters
        generation_params = {
            "do_sample": True,
            "guidance_scale": 3,
            "max_new_tokens": max_new_tokens
        }
        
        # Update with any user-provided parameters
        generation_params.update(parameters)
        
        # Generate audio
        with torch.cuda.amp.autocast():
            outputs = self.model.generate(**inputs, **generation_params)
            
        # Convert to list for JSON serialization
        generated_audio = outputs.cpu().numpy().tolist()
        
        return [{"generated_audio": generated_audio}]