rm-dev-null commited on
Commit
6f318d3
·
verified ·
1 Parent(s): 8a09c31

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -130
app.py CHANGED
@@ -1,61 +1,80 @@
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  css = """
61
  #col-container {
@@ -66,89 +85,27 @@ css = """
66
 
67
  with gr.Blocks(css=css) as demo:
68
  with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
 
71
  with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
  show_label=False,
75
  max_lines=1,
76
- placeholder="Enter your prompt",
77
  container=False,
78
  )
79
 
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
 
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
 
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
 
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
 
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
 
153
  if __name__ == "__main__":
154
- demo.launch()
 
1
  import gradio as gr
2
+ import requests
3
+ from pydub import AudioSegment
4
+ import shazamio
5
+ import os
6
+ import time
7
+
8
+ # Replace with your actual client ID and client secret
9
+ client_id = os.environ.get('SOUNDCLOUD_CLIENT_ID')
10
+ client_secret = os.environ.get('SOUNDCLOUD_CLIENT_SECRET')
11
+
12
+ def get_soundcloud_access_token(client_id, client_secret):
13
+ auth_string = f'{client_id}:{client_secret}'
14
+ auth_headers = {
15
+ 'Authorization': 'Basic ' + base64.b64encode(auth_string.encode()).decode()
16
+ }
17
+ data = {
18
+ 'grant_type': 'client_credentials'
19
+ }
20
+ response = requests.post('https://api.soundcloud.com/oauth2/token', headers=auth_headers, data=data)
21
+ if response.status_code == 200:
22
+ token_data = response.json()
23
+ return token_data['access_token']
24
+ else:
25
+ raise Exception(f"Failed to obtain access token: {response.text}")
26
+
27
+ def download_audio(streaming_url, output_path, headers):
28
+ response = requests.get(streaming_url, headers=headers, stream=True)
29
+ with open(output_path, 'wb') as f:
30
+ for chunk in response.iter_content(chunk_size=8192):
31
+ f.write(chunk)
32
+
33
+ async def identify_track(shazam, audio_chunk):
34
+ temp_file = 'temp_chunk.wav'
35
+ audio_chunk.export(temp_file, format='wav')
36
+ result = await shazam.recognize_file(temp_file)
37
+ if result and 'track' in result:
38
+ track_data = result['track']
39
+ return {
40
+ 'title': track_data['title'],
41
+ 'subtitle': track_data['subtitle']
42
+ }
43
+ else:
44
+ return None
45
+
46
+ async def process_dj_set(track_url, progress=gr.Progress()):
47
+ access_token = get_soundcloud_access_token(client_id, client_secret)
48
+ headers = {
49
+ 'Authorization': 'Bearer ' + access_token
50
+ }
51
+ resolve_response = requests.get(f'https://api.soundcloud.com/resolve.json?url={track_url}', headers=headers)
52
+ if resolve_response.status_code != 200:
53
+ return "Failed to resolve track URL.", ""
54
+ track_data = resolve_response.json()
55
+ streaming_url = track_data['stream_url']
56
+ download_audio(streaming_url, 'track.wav', headers)
57
+ audio = AudioSegment.from_wav('track.wav')
58
+ chunk_duration = 30000 # 30 seconds
59
+ overlap = 10000 # 10 seconds
60
+ chunks = []
61
+ start = 0
62
+ while start + chunk_duration < len(audio):
63
+ end = start + chunk_duration
64
+ chunk = audio[start:end]
65
+ chunks.append((start, chunk))
66
+ start += chunk_duration - overlap
67
+ shazam = shazamio.Shazam()
68
+ tracklist = []
69
+ for start_time, chunk in chunks:
70
+ progress(0.1)
71
+ track_info = await identify_track(shazam, chunk)
72
+ if track_info:
73
+ timestamp = time.strftime("%M:%S", time.gmtime(start_time / 1000))
74
+ tracklist.append(f"{timestamp} - {track_info['title']} by {track_info['subtitle']}")
75
+ tracklist_output = "\n".join(tracklist)
76
+ download_content = tracklist_output
77
+ return tracklist_output, download_content
78
 
79
  css = """
80
  #col-container {
 
85
 
86
  with gr.Blocks(css=css) as demo:
87
  with gr.Column(elem_id="col-container"):
88
+ gr.Markdown("# SoundCloud DJ Set Track Identifier")
89
 
90
  with gr.Row():
91
+ track_url = gr.Text(
92
+ label="SoundCloud DJ Set URL",
93
  show_label=False,
94
  max_lines=1,
95
+ placeholder="Enter SoundCloud DJ set URL",
96
  container=False,
97
  )
98
 
99
+ run_button = gr.Button("Process", scale=0, variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ result = gr.Textbox(label="Tracklist", show_label=False)
 
 
 
 
 
 
102
 
103
+ with gr.Accordion("Download Tracklist", open=False):
104
+ download_button = gr.File(label="Download", file_name="tracklist.txt")
 
 
 
 
 
 
105
 
106
+ gr.Examples(examples=["https://soundcloud.com/your-track-url"], inputs=[track_url])
 
 
 
 
 
 
107
 
108
+ run_button.click(process_dj_set, inputs=track_url, outputs=[result, download_button])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
  if __name__ == "__main__":
111
+ demo.launch()