File size: 4,789 Bytes
bb70eb3
 
 
 
 
 
9d54d5c
 
 
 
 
bb70eb3
9d54d5c
 
 
 
 
 
bb70eb3
 
 
 
 
 
 
 
 
 
 
 
 
 
9d54d5c
 
 
 
 
 
bb70eb3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d54d5c
 
 
 
 
bb70eb3
 
 
 
 
 
 
 
 
9d54d5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb70eb3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import argparse
import sys
import torch
from multiprocessing import cpu_count

class Config:
    """
    The code focuses on adapting the configuration based on available 
    hardware resources and specified command-line arguments,
    aiming to optimize the performance and capabilities of the voice conversion process.
    """
    def __init__(self):
        """
        Calls the arg_parse() and device_config() methods to set up configuration based on command-line arguments 
        and available hardware.

        Returns: None
        """
        self.device = "cuda:0"
        self.is_half = True
        self.n_cpu = 0
        self.gpu_name = None
        self.gpu_mem = None
        (
            self.colab,
            self.api,
            self.unsupported
        ) = self.arg_parse()
        self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()

    @staticmethod
    def arg_parse() -> tuple:
        """
        Uses the argparse library to parse command-line arguments.
        Three boolean arguments are defined: --colab, --api, and --unsupported.
        
        Returns: a tuple indicating whether each argument is specified or not.
        """
        parser = argparse.ArgumentParser()
        parser.add_argument("--colab", action="store_true", help="Launch in colab")
        parser.add_argument("--api", action="store_true", help="Launch with api")
        parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature")
        cmd_opts = parser.parse_args()

        return (
            cmd_opts.colab,
            cmd_opts.api,
            cmd_opts.unsupported
        )

    # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
    # check `getattr` and try it for compatibility
    @staticmethod
    def has_mps() -> bool:
        """
        Determines if Multi-Process Service (MPS) is available in the current PyTorch backend.
        If MPS is available, it checks whether it can be used by trying to move a tensor to the "mps" device.
        Returns a boolean indicating MPS support.
        """
        if not torch.backends.mps.is_available():
            return False
        try:
            torch.zeros(1).to(torch.device("mps"))
            return True
        except Exception:
            return False

    def device_config(self) -> tuple:
        """
        Checks if a CUDA-compatible GPU is available.
        
        If a compatible GPU is found:
            Determines the GPU's name and memory capacity.
            Adjusts the is_half parameter based on the GPU's characteristics.
        
        If no compatible GPU is found and MPS is available, configures the device to use MPS.
        
        If no compatible GPU and MPS support, configures the device to use CPU.
        
        Determines the number of available CPU cores (n_cpu).
        
        Based on the is_half value and GPU memory capacity, configures several variables related to voice conversion,
          such as x_pad, x_query, x_center, and x_max.
        """
        if torch.cuda.is_available():
            i_device = int(self.device.split(":")[-1])
            self.gpu_name = torch.cuda.get_device_name(i_device)
            if (
                ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
                or "P40" in self.gpu_name.upper()
                or "1060" in self.gpu_name
                or "1070" in self.gpu_name
                or "1080" in self.gpu_name
            ):
                print("INFO: Found GPU", self.gpu_name, ", force to fp32")
                self.is_half = False
            else:
                print("INFO: Found GPU", self.gpu_name)
            self.gpu_mem = int(
                torch.cuda.get_device_properties(i_device).total_memory
                / 1024
                / 1024
                / 1024
                + 0.4
            )
        elif self.has_mps():
            print("INFO: No supported Nvidia GPU found, use MPS instead")
            self.device = "mps"
            self.is_half = False
        else:
            print("INFO: No supported Nvidia GPU found, use CPU instead")
            self.device = "cpu"
            self.is_half = False

        if self.n_cpu == 0:
            self.n_cpu = cpu_count()

        if self.is_half:
            # 6G显存配置
            x_pad = 3
            x_query = 10
            x_center = 60
            x_max = 65
        else:
            # 5G显存配置
            x_pad = 1
            x_query = 6
            x_center = 38
            x_max = 41

        if self.gpu_mem != None and self.gpu_mem <= 4:
            x_pad = 1
            x_query = 5
            x_center = 30
            x_max = 32

        return x_pad, x_query, x_center, x_max