fffiloni commited on
Commit
de7981a
β€’
1 Parent(s): 4bc845a

show Llama prompt + share button

Browse files
Files changed (1) hide show
  1. app.py +113 -11
app.py CHANGED
@@ -9,6 +9,7 @@ from gradio_client import Client
9
  client = Client("https://fffiloni-test-llama-api.hf.space/", hf_token=hf_token)
10
  lyrics_client = Client("https://fffiloni-music-to-lyrics.hf.space/")
11
 
 
12
 
13
  from compel import Compel, ReturnedEmbeddingsType
14
  from diffusers import DiffusionPipeline
@@ -92,7 +93,7 @@ def infer(audio_file, has_lyrics):
92
 
93
  Here's the music description :
94
 
95
- « {cap_result} »
96
 
97
  And here are the lyrics :
98
 
@@ -109,7 +110,7 @@ def infer(audio_file, has_lyrics):
109
 
110
  Here's the music description :
111
 
112
- « {cap_result} »
113
  """
114
  print("""β€”β€”β€”
115
  Calling Llama2 ...
@@ -123,7 +124,7 @@ def infer(audio_file, has_lyrics):
123
 
124
  print(f"Llama2 result: {result}")
125
 
126
- #Β β€”β€”β€”
127
  print("""β€”β€”β€”
128
  Calling SD-XL ...
129
  """)
@@ -134,10 +135,83 @@ def infer(audio_file, has_lyrics):
134
  print("Finished")
135
 
136
  #return cap_result, result, images
137
- return images, result, gr.update(visible=True)
138
 
139
  css = """
140
- #col-container {max-width: 510px; margin-left: auto; margin-right: auto;}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  """
142
  with gr.Blocks(css=css) as demo:
143
  with gr.Column(elem_id="col-container"):
@@ -161,23 +235,51 @@ with gr.Blocks(css=css) as demo:
161
  Note: Only the first 30 seconds of your audio will be used for inference.
162
  </p>
163
  </div>""")
 
164
  audio_input = gr.Audio(label="Music input", type="filepath", source="upload")
165
- has_lyrics = gr.Radio(label="Does your audio has lyrics ?", choices=["Yes", "No"], value="No", info="If yes, the image should reflect the lyrics, but be aware that because we add a step (getting lyrics), inference will take more time.")
 
 
166
  infer_btn = gr.Button("Generate Image from Music")
167
  #lpmc_cap = gr.Textbox(label="Lp Music Caps caption")
168
- llama_trans_cap = gr.Textbox(label="Llama translation", visible=False)
169
- img_result = gr.Image(label="Image Result")
170
- tryagain_btn = gr.Button("Try another image ?", visible=False)
 
 
 
 
 
 
171
 
172
  gr.Examples(examples=[["./examples/electronic.mp3", "No"],["./examples/folk.wav", "No"], ["./examples/orchestra.wav", "No"]],
173
  fn=infer,
174
  inputs=[audio_input, has_lyrics],
175
- outputs=[img_result, llama_trans_cap, tryagain_btn],
176
  cache_examples=True
177
  )
178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  #infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap, img_result])
180
- infer_btn.click(fn=infer, inputs=[audio_input, has_lyrics], outputs=[img_result, llama_trans_cap, tryagain_btn])
 
181
  tryagain_btn.click(fn=solo_xd, inputs=[llama_trans_cap], outputs=[img_result])
182
 
183
  demo.queue(max_size=20).launch()
 
9
  client = Client("https://fffiloni-test-llama-api.hf.space/", hf_token=hf_token)
10
  lyrics_client = Client("https://fffiloni-music-to-lyrics.hf.space/")
11
 
12
+ from share_btn import community_icon_html, loading_icon_html, share_js
13
 
14
  from compel import Compel, ReturnedEmbeddingsType
15
  from diffusers import DiffusionPipeline
 
93
 
94
  Here's the music description :
95
 
96
+ Β« {cap_result} Β»
97
 
98
  And here are the lyrics :
99
 
 
110
 
111
  Here's the music description :
112
 
113
+ Β« {cap_result} Β»
114
  """
115
  print("""β€”β€”β€”
116
  Calling Llama2 ...
 
124
 
125
  print(f"Llama2 result: {result}")
126
 
127
+ # β€”β€”β€”
128
  print("""β€”β€”β€”
129
  Calling SD-XL ...
130
  """)
 
135
  print("Finished")
136
 
137
  #return cap_result, result, images
138
+ return images, result, gr.update(visible=True), gr.Group.update(visible=True)
139
 
140
  css = """
141
+ #col-container {max-width: 780px; margin-left: auto; margin-right: auto;}
142
+ a {text-decoration-line: underline; font-weight: 600;}
143
+ .animate-spin {
144
+ animation: spin 1s linear infinite;
145
+ }
146
+ @keyframes spin {
147
+ from {
148
+ transform: rotate(0deg);
149
+ }
150
+ to {
151
+ transform: rotate(360deg);
152
+ }
153
+ }
154
+ #share-btn-container {
155
+ display: flex;
156
+ padding-left: 0.5rem !important;
157
+ padding-right: 0.5rem !important;
158
+ background-color: #000000;
159
+ justify-content: center;
160
+ align-items: center;
161
+ border-radius: 9999px !important;
162
+ max-width: 13rem;
163
+ }
164
+ div#share-btn-container > div {
165
+ flex-direction: row;
166
+ background: black;
167
+ align-items: center;
168
+ }
169
+ #share-btn-container:hover {
170
+ background-color: #060606;
171
+ }
172
+ #share-btn {
173
+ all: initial;
174
+ color: #ffffff;
175
+ font-weight: 600;
176
+ cursor:pointer;
177
+ font-family: 'IBM Plex Sans', sans-serif;
178
+ margin-left: 0.5rem !important;
179
+ padding-top: 0.5rem !important;
180
+ padding-bottom: 0.5rem !important;
181
+ right:0;
182
+ }
183
+ #share-btn * {
184
+ all: unset;
185
+ }
186
+ #share-btn-container div:nth-child(-n+2){
187
+ width: auto !important;
188
+ min-height: 0px !important;
189
+ }
190
+ #share-btn-container .wrap {
191
+ display: none !important;
192
+ }
193
+ #share-btn-container.hidden {
194
+ display: none!important;
195
+ }
196
+ .footer {
197
+ margin-bottom: 45px;
198
+ margin-top: 10px;
199
+ text-align: center;
200
+ border-bottom: 1px solid #e5e5e5;
201
+ }
202
+ .footer>p {
203
+ font-size: .8rem;
204
+ display: inline-block;
205
+ padding: 0 10px;
206
+ transform: translateY(10px);
207
+ background: white;
208
+ }
209
+ .dark .footer {
210
+ border-color: #303030;
211
+ }
212
+ .dark .footer>p {
213
+ background: #0b0f19;
214
+ }
215
  """
216
  with gr.Blocks(css=css) as demo:
217
  with gr.Column(elem_id="col-container"):
 
235
  Note: Only the first 30 seconds of your audio will be used for inference.
236
  </p>
237
  </div>""")
238
+
239
  audio_input = gr.Audio(label="Music input", type="filepath", source="upload")
240
+ with gr.Row():
241
+ has_lyrics = gr.Radio(label="Does your audio has lyrics ?", choices=["Yes", "No"], value="No", info="If yes, the image should reflect the lyrics, but be aware that because we add a step (getting lyrics), inference will take more time.")
242
+ song_title = gr.Textbox(label="Song Title", value="Title: ", interactive=True, info="If you want to share your result, please provide the title of your audio sample :)", elem_id="song-title")
243
  infer_btn = gr.Button("Generate Image from Music")
244
  #lpmc_cap = gr.Textbox(label="Lp Music Caps caption")
245
+ with gr.Row():
246
+ llama_trans_cap = gr.Textbox(label="Llama Image Suggestion", placeholder="Llama2 image prompt suggestion will be displayed here ;)", visible=True, lines=12, elem_id="llama-prompt")
247
+ img_result = gr.Image(label="Image Result", elem_id="image-out")
248
+ with gr.Row():
249
+ tryagain_btn = gr.Button("Try another image ?", visible=False)
250
+ with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
251
+ community_icon = gr.HTML(community_icon_html)
252
+ loading_icon = gr.HTML(loading_icon_html)
253
+ share_button = gr.Button("Share to community", elem_id="share-btn")
254
 
255
  gr.Examples(examples=[["./examples/electronic.mp3", "No"],["./examples/folk.wav", "No"], ["./examples/orchestra.wav", "No"]],
256
  fn=infer,
257
  inputs=[audio_input, has_lyrics],
258
+ outputs=[img_result, llama_trans_cap, tryagain_btn, share_group],
259
  cache_examples=True
260
  )
261
 
262
+ gr.HTML("""
263
+ <div class="footer">
264
+ <p>
265
+ Music to Image Demo by πŸ€— <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a>
266
+ </p>
267
+ </div>
268
+ <div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;">
269
+ <p style="font-size: 0.8em;margin-bottom: 4px;">You may also like: </p>
270
+ <div id="may-like" style="display:flex; align-items:center; justify-content: center;height:20px;">
271
+ <svg height="20" width="182" style="margin-left:4px">
272
+ <a href="https://huggingface.co/spaces/fffiloni/Music-To-Zeroscope" target="_blank">
273
+ <image href="https://img.shields.io/badge/πŸ€— Spaces-Music To Zeroscope-blue" src="https://img.shields.io/badge/πŸ€— Spaces-Music To Zeroscope-blue.png" height="20"/>
274
+ </a>
275
+ </svg>
276
+ </div>
277
+ </div>
278
+ """)
279
+
280
  #infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap, img_result])
281
+ infer_btn.click(fn=infer, inputs=[audio_input, has_lyrics], outputs=[img_result, llama_trans_cap, tryagain_btn, share_group])
282
+ share_button.click(None, [], [], _js=share_js)
283
  tryagain_btn.click(fn=solo_xd, inputs=[llama_trans_cap], outputs=[img_result])
284
 
285
  demo.queue(max_size=20).launch()