jhtonyKoo commited on
Commit
45ea80f
·
verified ·
1 Parent(s): 69ccb3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +148 -308
app.py CHANGED
@@ -1,332 +1,172 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
 
2
  import torch
 
 
3
  import soundfile as sf
4
- import numpy as np
5
- import yaml
6
- from inference import MasteringStyleTransfer
7
- from utils import download_youtube_audio
8
- from config import args
9
- import pyloudnorm as pyln
10
- import tempfile
11
- import os
12
- import pandas as pd
13
 
14
- mastering_transfer = MasteringStyleTransfer(args)
15
 
16
- def denormalize_audio(audio, dtype=np.int16):
17
- """
18
- Denormalize the audio from the range [-1, 1] to the full range of the specified dtype.
19
- """
20
- if dtype == np.int16:
21
- audio = np.clip(audio, -1, 1) # Ensure the input is in the range [-1, 1]
22
- return (audio * 32767).astype(np.int16)
23
- elif dtype == np.float32:
24
- return audio.astype(np.float32)
25
- else:
26
- raise ValueError("Unsupported dtype. Use np.int16 or np.float32.")
27
 
28
- def loudness_normalize(audio, sample_rate, target_loudness=-12.0):
29
- # Ensure audio is float32
30
- if audio.dtype != np.float32:
31
- audio = audio.astype(np.float32)
32
-
33
- # If audio is mono, reshape to (samples, 1)
34
- if audio.ndim == 1:
35
- audio = audio.reshape(-1, 1)
36
-
37
- meter = pyln.Meter(sample_rate) # create BS.1770 meter
38
- loudness = meter.integrated_loudness(audio)
39
-
40
- loudness_normalized_audio = pyln.normalize.loudness(audio, loudness, target_loudness)
41
- return loudness_normalized_audio
42
 
43
- def process_youtube_url(url):
44
- try:
45
- audio, sr = download_youtube_audio(url)
46
- return (sr, audio), None
47
- except Exception as e:
48
- return None, f"Error processing YouTube URL: {str(e)}"
49
 
50
- def download_youtube_audios(input_youtube_url, reference_youtube_url):
51
- input_audio, input_error = process_youtube_url(input_youtube_url) if input_youtube_url else (None, None)
52
- reference_audio, reference_error = process_youtube_url(reference_youtube_url) if reference_youtube_url else (None, None)
 
 
 
 
 
 
 
 
 
53
 
54
- return input_audio, reference_audio, input_error, reference_error
55
 
56
- def process_audio_with_youtube(input_audio, input_youtube_url, reference_audio, reference_youtube_url):
57
- if input_youtube_url:
58
- input_audio, error = process_youtube_url(input_youtube_url)
59
- if error:
60
- return None, None, error
 
 
 
 
 
 
 
61
 
62
- if reference_youtube_url:
63
- reference_audio, error = process_youtube_url(reference_youtube_url)
64
- if error:
65
- return None, None, error
 
 
 
 
66
 
67
- if input_audio is None or reference_audio is None:
68
- return None, None, "Both input and reference audio are required."
69
 
70
- return process_audio(input_audio, reference_audio)
71
-
72
- def to_numpy_audio(audio):
73
- # Convert output_audio to numpy array if it's a tensor
74
- if isinstance(audio, torch.Tensor):
75
- audio = audio.cpu().numpy()
76
- # check dimension
77
- if audio.ndim == 1:
78
- audio = audio.reshape(-1, 1)
79
- elif audio.ndim > 2:
80
- audio = audio.squeeze()
81
- # Ensure the audio is in the correct shape (samples, channels)
82
- if audio.shape[1] > audio.shape[0]:
83
- audio = audio.transpose(1,0)
84
- return audio
85
-
86
- def process_audio(input_audio, reference_audio):
87
- output_audio, predicted_params, sr, normalized_input = mastering_transfer.process_audio(
88
- input_audio, reference_audio
89
- )
90
 
91
- param_output = mastering_transfer.get_param_output_string(predicted_params)
92
-
93
- # Convert to numpy audio
94
- output_audio = to_numpy_audio(output_audio)
95
- normalized_input = to_numpy_audio(normalized_input)
96
- # Normalize output audio
97
- output_audio = loudness_normalize(output_audio, sr)
98
- # Denormalize the audio to int16
99
- output_audio = denormalize_audio(output_audio, dtype=np.int16)
100
-
101
- return (sr, output_audio), param_output, (sr, normalized_input)
102
-
103
- def perform_ito(input_audio, reference_audio, ito_reference_audio, num_steps, optimizer, learning_rate, af_weights, loss_function, clap_target_type, clap_text_prompt, clap_distance_fn):
104
- if ito_reference_audio is None:
105
- ito_reference_audio = reference_audio
106
- af_weights = [float(w.strip()) for w in af_weights.split(',')]
107
-
108
- ito_config = {
109
- 'optimizer': optimizer,
110
- 'learning_rate': learning_rate,
111
- 'num_steps': num_steps,
112
- 'af_weights': af_weights,
113
- 'sample_rate': args.sample_rate,
114
- 'loss_function': loss_function,
115
- 'clap_target_type': clap_target_type,
116
- 'clap_text_prompt': clap_text_prompt,
117
- 'clap_distance_fn': clap_distance_fn
118
- }
119
-
120
- input_tensor = mastering_transfer.preprocess_audio(input_audio, args.sample_rate)
121
- reference_tensor = mastering_transfer.preprocess_audio(reference_audio, args.sample_rate)
122
- ito_reference_tensor = mastering_transfer.preprocess_audio(ito_reference_audio, args.sample_rate)
123
-
124
- initial_reference_feature = mastering_transfer.get_reference_embedding(reference_tensor)
125
-
126
- all_results, min_loss_step = mastering_transfer.inference_time_optimization(
127
- input_tensor, ito_reference_tensor, ito_config, initial_reference_feature
128
- )
129
 
130
- ito_log = ""
131
- loss_values = []
132
- for result in all_results:
133
- ito_log += result['log']
134
- loss_values.append({"step": result['step'], "loss": result['loss']})
135
-
136
- # Return the results of the last step
137
- last_result = all_results[-1]
138
- current_output = last_result['audio']
139
- ito_param_output = mastering_transfer.get_param_output_string(last_result['params'])
140
 
141
- # Convert to numpy audio
142
- current_output = to_numpy_audio(current_output)
143
- # Loudness normalize output audio
144
- current_output = loudness_normalize(current_output, args.sample_rate)
145
- # Denormalize the audio to int16
146
- current_output = denormalize_audio(current_output, dtype=np.int16)
147
 
148
- return (args.sample_rate, current_output), ito_param_output, num_steps, ito_log, pd.DataFrame(loss_values), all_results
149
-
150
- def update_ito_output(all_results, selected_step):
151
- selected_result = all_results[selected_step - 1]
152
- current_output = selected_result['audio']
153
- ito_param_output = mastering_transfer.get_param_output_string(selected_result['params'])
154
-
155
- # Convert to numpy audio
156
- current_output = to_numpy_audio(current_output)
157
- # Loudness normalize output audio
158
- current_output = loudness_normalize(current_output, args.sample_rate)
159
- # Denormalize the audio to int16
160
- current_output = denormalize_audio(current_output, dtype=np.int16)
161
-
162
- return (args.sample_rate, current_output), ito_param_output, selected_result['log']
163
-
164
-
165
- """ APP display """
166
  with gr.Blocks() as demo:
167
- gr.Markdown("# ITO-Master: Inference Time Optimization for Mastering Style Transfer")
168
- with gr.Row():
169
- gr.Markdown("Interactive demo of Inference Time Optimization (ITO) for Music Mastering Style Transfer. \
170
- The mastering style transfer is performed by a differentiable audio processing model, and the predicted parameters are shown as the output. \
171
- Perform mastering style transfer with an input source audio and a reference mastering style audio. On top of this result, you can perform ITO to optimize the reference embedding $z_{ref}$ to further gain control over the output mastering style.")
172
- gr.Image("ito_snow.png", width=500, height=300, label="ITO pipeline")
173
-
174
- gr.Markdown("## Step 1: Mastering Style Transfer")
175
-
176
- with gr.Tab("Upload Audio"):
177
- with gr.Row():
178
- input_audio = gr.Audio(label="Source Audio $x_{in}$")
179
- reference_audio = gr.Audio(label="Reference Style Audio $x_{ref}$")
180
-
181
- process_button = gr.Button("Process Mastering Style Transfer")
182
- gr.Markdown('<span style="color: lightgray; font-style: italic;">all output samples are normalized to -12dB LUFS</span>')
183
-
184
- with gr.Row():
185
- with gr.Column():
186
- output_audio = gr.Audio(label="Output Audio y'", type='numpy')
187
- normalized_input = gr.Audio(label="Normalized Source Audio", type='numpy')
188
- param_output = gr.Textbox(label="Predicted Parameters", lines=5)
189
-
190
- process_button.click(
191
- process_audio,
192
- inputs=[input_audio, reference_audio],
193
- outputs=[output_audio, param_output, normalized_input]
194
- )
195
-
196
- with gr.Tab("YouTube Audio"):
197
- gr.Markdown("Seems like it's currently unavailable to download YouTube clips from HuggingFace... But you could try out yourself in your environment with the available source code.")
198
- with gr.Row():
199
- input_youtube_url = gr.Textbox(label="Input YouTube URL")
200
- reference_youtube_url = gr.Textbox(label="Reference YouTube URL")
201
-
202
- download_button = gr.Button("Download YouTube Audios")
203
- error_message_yt = gr.Textbox(label="Error Message", visible=False)
204
-
205
- with gr.Row():
206
- input_audio_yt = gr.Audio(label="Source Audio (Do not put when using YouTube URL)")
207
- reference_audio_yt = gr.Audio(label="Reference Style Audio (Do not put when using YouTube URL)")
208
-
209
- process_button_yt = gr.Button("Process Mastering Style Transfer")
210
- gr.Markdown('<span style="color: lightgray; font-style: italic;">all output samples are normalized to -12dB LUFS</span>')
211
-
212
- with gr.Row():
213
- with gr.Column():
214
- output_audio_yt = gr.Audio(label="Output Audio y'", type='numpy')
215
- normalized_input_yt = gr.Audio(label="Normalized Source Audio", type='numpy')
216
- param_output_yt = gr.Textbox(label="Predicted Parameters", lines=5)
217
-
218
- def handle_download_youtube_audios(input_youtube_url, reference_youtube_url):
219
- input_audio, reference_audio, input_error, reference_error = download_youtube_audios(input_youtube_url, reference_youtube_url)
220
- if input_error or reference_error:
221
- return None, None, gr.update(visible=True, value=input_error or reference_error)
222
- return input_audio, reference_audio, gr.update(visible=False, value="")
223
-
224
- download_button.click(
225
- handle_download_youtube_audios,
226
- inputs=[input_youtube_url, reference_youtube_url],
227
- outputs=[input_audio_yt, reference_audio_yt, error_message_yt]
228
- )
229
-
230
- process_button_yt.click(
231
- process_audio,
232
- inputs=[input_audio_yt, reference_audio_yt],
233
- outputs=[output_audio_yt, param_output_yt, normalized_input_yt]
234
- )
235
-
236
- # def process_and_handle_errors(input_audio, input_youtube_url, reference_audio, reference_youtube_url):
237
- # result = process_audio_with_youtube(input_audio, input_youtube_url, reference_audio, reference_youtube_url)
238
- # if len(result) == 3 and isinstance(result[2], str): # Error occurred check
239
- # return None, None, None, gr.update(visible=True, value=result[2])
240
- # return result[0], result[1], result[2], gr.update(visible=False, value="")
241
-
242
- # process_button_yt.click(
243
- # process_and_handle_errors,
244
- # inputs=[input_audio_yt, input_youtube_url, reference_audio_yt, reference_youtube_url],
245
- # outputs=[output_audio_yt, param_output_yt, normalized_input_yt, error_message_yt]
246
- # )
247
-
248
- gr.Markdown("## Step 2: Inference Time Optimization (ITO)")
249
-
250
- with gr.Row():
251
- ito_reference_audio = gr.Audio(label="ITO Reference Style Audio $x'_{ref}$ (optional)")
252
- with gr.Column():
253
- num_steps = gr.Slider(minimum=1, maximum=100, value=10, step=1, label="Number of Steps for additional optimization")
254
- optimizer = gr.Dropdown(["Adam", "RAdam", "SGD"], value="RAdam", label="Optimizer")
255
- learning_rate = gr.Slider(minimum=0.0001, maximum=0.1, value=0.001, step=0.0001, label="Learning Rate")
256
- loss_function = gr.Radio(["AudioFeatureLoss", "CLAPFeatureLoss"], label="Loss Function", value="AudioFeatureLoss")
257
-
258
- # Audio Feature Loss weights
259
- with gr.Column(visible=True) as audio_feature_weights:
260
- af_weights = gr.Textbox(
261
- label="AudioFeatureLoss Weights (comma-separated)",
262
- value="0.1,0.001,1.0,1.0,0.1",
263
- info="RMS, Crest Factor, Stereo Width, Stereo Imbalance, Bark Spectrum"
264
- )
265
-
266
- # CLAP Loss options
267
- with gr.Column(visible=False) as clap_options:
268
- clap_target_type = gr.Radio(["Audio", "Text"], label="CLAP Target Type", value="Audio")
269
- clap_text_prompt = gr.Textbox(label="CLAP Text Prompt", visible=False)
270
- clap_distance_fn = gr.Dropdown(["cosine", "mse", "l1"], label="CLAP Distance Function", value="cosine")
271
-
272
- def update_clap_options(loss_function):
273
- if loss_function == "CLAPFeatureLoss":
274
- return gr.update(visible=False), gr.update(visible=True)
275
- else:
276
- return gr.update(visible=True), gr.update(visible=False)
277
-
278
- loss_function.change(
279
- update_clap_options,
280
- inputs=[loss_function],
281
- outputs=[audio_feature_weights, clap_options]
282
  )
283
-
284
- def update_clap_text_prompt(clap_target_type):
285
- return gr.update(visible=clap_target_type == "Text")
286
-
287
- clap_target_type.change(
288
- update_clap_text_prompt,
289
- inputs=[clap_target_type],
290
- outputs=[clap_text_prompt]
291
  )
292
-
293
- ito_button = gr.Button("Perform ITO")
294
- gr.Markdown('<span style="color: lightgray; font-style: italic;">all output samples are normalized to -12dB LUFS</span>')
295
-
296
- with gr.Row():
297
  with gr.Column():
298
- ito_output_audio = gr.Audio(label="ITO Output Audio")
299
- ito_step_slider = gr.Slider(minimum=1, maximum=100, step=1, label="ITO Step", interactive=True)
300
- ito_param_output = gr.Textbox(label="ITO Predicted Parameters", lines=15)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  with gr.Column():
302
- ito_loss_plot = gr.LinePlot(
303
- x="step",
304
- y="loss",
305
- title="ITO Loss Curve",
306
- x_title="Step",
307
- y_title="Loss",
308
- height=300,
309
- width=600,
310
  )
311
- ito_log = gr.Textbox(label="ITO Log", lines=10)
312
-
313
- all_results = gr.State([])
314
-
315
- ito_button.click(
316
- perform_ito,
317
- inputs=[normalized_input, reference_audio, ito_reference_audio, num_steps, optimizer, learning_rate, af_weights, loss_function, clap_target_type, clap_text_prompt, clap_distance_fn],
318
- outputs=[ito_output_audio, ito_param_output, ito_step_slider, ito_log, ito_loss_plot, all_results]
319
- ).then(
320
- update_ito_output,
321
- inputs=[all_results, ito_step_slider],
322
- outputs=[ito_output_audio, ito_param_output, ito_log]
323
- )
324
-
325
- ito_step_slider.change(
326
- update_ito_output,
327
- inputs=[all_results, ito_step_slider],
328
- outputs=[ito_output_audio, ito_param_output, ito_log]
329
- )
330
 
331
- demo.launch()
332
 
 
 
 
 
1
+ import os
2
+ import binascii
3
+ import warnings
4
+
5
+ import json
6
+ import argparse
7
+ import copy
8
+
9
+ import numpy as np
10
+ import matplotlib.pyplot as plt
11
  import torch
12
+ import tqdm
13
+ import librosa
14
  import soundfile as sf
15
+ import gradio as gr
16
+ import pytube as pt
 
 
 
 
 
 
 
17
 
18
+ from pytube.exceptions import VideoUnavailable
19
 
20
+ from inference.style_transfer import *
 
 
 
 
 
 
 
 
 
 
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ yt_video_dir = f"./yt_dir/0"
24
+ os.makedirs(yt_video_dir, exist_ok=True)
25
+
 
 
 
26
 
27
+ def get_audio_from_yt_video_input(yt_link: str, start_point_in_second=0, duration_in_second=30):
28
+ try:
29
+ yt = pt.YouTube(yt_link)
30
+ t = yt.streams.filter(only_audio=True)
31
+ filename_in = os.path.join(yt_video_dir, "input.wav")
32
+ t[0].download(filename=filename_in)
33
+ except VideoUnavailable as e:
34
+ warnings.warn(f"Video Not Found at {yt_link} ({e})")
35
+ filename_in = None
36
+
37
+ # trim audio length - due to computation time on HuggingFace environment
38
+ trim_audio(target_file_path=filename_in, start_point_in_second=start_point_in_second, duration_in_second=duration_in_second)
39
 
40
+ return filename_in, filename_in
41
 
42
+ def get_audio_from_yt_video_ref(yt_link: str, start_point_in_second=0, duration_in_second=30):
43
+ try:
44
+ yt = pt.YouTube(yt_link)
45
+ t = yt.streams.filter(only_audio=True)
46
+ filename_ref = os.path.join(yt_video_dir, "reference.wav")
47
+ t[0].download(filename=filename_ref)
48
+ except VideoUnavailable as e:
49
+ warnings.warn(f"Video Not Found at {yt_link} ({e})")
50
+ filename_ref = None
51
+
52
+ # trim audio length - due to computation time on HuggingFace environment
53
+ trim_audio(target_file_path=filename_ref, start_point_in_second=start_point_in_second, duration_in_second=duration_in_second)
54
 
55
+ return filename_ref, filename_ref
56
+
57
+ def inference(file_uploaded_in, file_uploaded_ref):
58
+ # clear out previously separated results
59
+ os.system(f"rm -r {yt_video_dir}/separated")
60
+ # change file path name
61
+ os.system(f"cp {file_uploaded_in} {yt_video_dir}/input.wav")
62
+ os.system(f"cp {file_uploaded_ref} {yt_video_dir}/reference.wav")
63
 
64
+ # Perform music mixing style transfer
65
+ args = set_up()
66
 
67
+ inference_style_transfer = Mixing_Style_Transfer_Inference(args)
68
+ output_wav_path, fin_data_out_mix = inference_style_transfer.inference(file_uploaded_in, file_uploaded_ref)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ return (44100, fin_data_out_mix)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
 
 
 
 
 
 
 
 
 
 
72
 
 
 
 
 
 
 
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  with gr.Blocks() as demo:
75
+ gr.HTML(
76
+ """
77
+ <div style="text-align: center; max-width: 700px; margin: 0 auto;">
78
+ <div
79
+ style="
80
+ display: inline-flex;
81
+ align-items: center;
82
+ gap: 0.8rem;
83
+ font-size: 1.75rem;
84
+ "
85
+ >
86
+ <h1 style="font-weight: 900; margin-bottom: 7px;">
87
+ Music Mixing Style Transfer
88
+ </h1>
89
+ </div>
90
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  )
92
+ gr.Markdown(
93
+ """
94
+ This page is a Hugging Face interactive demo of the paper ["Music Mixing Style Transfer: A Contrastive Learning Approach to Disentangle Audio Effects"](https://huggingface.co/papers/2211.02247) (ICASSP 2023).
95
+ - [project page](https://jhtonykoo.github.io/MixingStyleTransfer/)
96
+ - [GitHub](https://github.com/jhtonyKoo/music_mixing_style_transfer)
97
+ - [supplementary](https://pale-cicada-946.notion.site/Music-Mixing-Style-Transfer-A-Contrastive-Learning-Approach-to-Disentangle-Audio-Effects-Supplemen-e6eccd9a431a4a8fa4fdd5adb2d3f219)
98
+ """
 
99
  )
100
+ with gr.Group():
 
 
 
 
101
  with gr.Column():
102
+ with gr.Blocks():
103
+ with gr.Tab("Input Music"):
104
+ file_uploaded_in = gr.Audio(label="Input track (mix) to be mixing style transferred", type='filepath')
105
+ with gr.Tab("YouTube url"):
106
+ with gr.Row():
107
+ yt_link_in = gr.Textbox(
108
+ label="Enter YouTube Link of the Video", autofocus=True, lines=3
109
+ )
110
+ yt_in_start_sec = gr.Number(
111
+ value=0,
112
+ label="starting point of the song (in seconds)"
113
+ )
114
+ yt_in_duration_sec = gr.Number(
115
+ value=30,
116
+ label="duration of the song (in seconds)"
117
+ )
118
+ yt_btn_in = gr.Button("Download Audio from YouTube Link", size="lg")
119
+ yt_audio_path_in = gr.Audio(
120
+ label="Input Audio Extracted from the YouTube Video", interactive=False
121
+ )
122
+ yt_btn_in.click(
123
+ get_audio_from_yt_video_input,
124
+ inputs=[yt_link_in, yt_in_start_sec, yt_in_duration_sec],
125
+ outputs=[yt_audio_path_in, file_uploaded_in],
126
+ )
127
+ with gr.Blocks():
128
+ with gr.Tab("Reference Music"):
129
+ file_uploaded_ref = gr.Audio(label="Reference track (mix) to copy mixing style", type='filepath')
130
+ with gr.Tab("YouTube url"):
131
+ with gr.Row():
132
+ yt_link_ref = gr.Textbox(
133
+ label="Enter YouTube Link of the Video", autofocus=True, lines=3
134
+ )
135
+ yt_ref_start_sec = gr.Number(
136
+ value=0,
137
+ label="starting point of the song (in seconds)"
138
+ )
139
+ yt_ref_duration_sec = gr.Number(
140
+ value=30,
141
+ label="duration of the song (in seconds)"
142
+ )
143
+ yt_btn_ref = gr.Button("Download Audio from YouTube Link", size="lg")
144
+ yt_audio_path_ref = gr.Audio(
145
+ label="Reference Audio Extracted from the YouTube Video", interactive=False
146
+ )
147
+ yt_btn_ref.click(
148
+ get_audio_from_yt_video_ref,
149
+ inputs=[yt_link_ref, yt_ref_start_sec, yt_ref_duration_sec],
150
+ outputs=[yt_audio_path_ref, file_uploaded_ref],
151
+ )
152
+
153
+ with gr.Group():
154
+ gr.HTML(
155
+ """
156
+ <div> <h3> <center> Mixing Style Transfer. Perform stem-wise audio-effects style conversion by first source separating the input mix. The inference computation time takes longer as the input samples' duration. so plz be patient... </h3> </div>
157
+ """
158
+ )
159
  with gr.Column():
160
+ inference_btn = gr.Button("Run Mixing Style Transfer")
161
+ with gr.Row():
162
+ output_mix = gr.Audio(label="mixing style transferred music track", type='numpy')
163
+ inference_btn.click(
164
+ inference,
165
+ inputs=[file_uploaded_in, file_uploaded_ref],
166
+ outputs=[output_mix],
 
167
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
168
 
 
169
 
170
+
171
+ if __name__ == "__main__":
172
+ demo.launch(debug=True)