Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ lpmc_client = gr.load("seungheondoh/LP-Music-Caps-demo", src="spaces")
|
|
4 |
from gradio_client import Client
|
5 |
|
6 |
client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
|
|
|
7 |
|
8 |
from pydub import AudioSegment
|
9 |
|
@@ -47,7 +48,7 @@ def infer(audio_file):
|
|
47 |
llama_q = f"""
|
48 |
|
49 |
I'll give you music description, then i want you to provide an illustrative image description that would fit well with the music.
|
50 |
-
Answer with only one image description. Never do lists.
|
51 |
|
52 |
Here's the music description :
|
53 |
|
@@ -61,8 +62,10 @@ def infer(audio_file):
|
|
61 |
)
|
62 |
|
63 |
print(result)
|
|
|
|
|
64 |
|
65 |
-
return cap_result, result
|
66 |
|
67 |
with gr.Blocks() as demo:
|
68 |
with gr.Column(elem_id="col-container"):
|
@@ -72,6 +75,6 @@ with gr.Blocks() as demo:
|
|
72 |
llama_trans_cap = gr.Textbox(label="Llama translation")
|
73 |
img_result = gr.Video(label="Result")
|
74 |
|
75 |
-
infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap])
|
76 |
|
77 |
demo.queue().launch()
|
|
|
4 |
from gradio_client import Client
|
5 |
|
6 |
client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
|
7 |
+
sd = gr.load("runwayml/stable-diffusion-v1-5", src="spaces")
|
8 |
|
9 |
from pydub import AudioSegment
|
10 |
|
|
|
48 |
llama_q = f"""
|
49 |
|
50 |
I'll give you music description, then i want you to provide an illustrative image description that would fit well with the music.
|
51 |
+
Answer with only one image description. Never do lists.
|
52 |
|
53 |
Here's the music description :
|
54 |
|
|
|
62 |
)
|
63 |
|
64 |
print(result)
|
65 |
+
|
66 |
+
image = sd(result, fn_index=0)[0]
|
67 |
|
68 |
+
return cap_result, result, image
|
69 |
|
70 |
with gr.Blocks() as demo:
|
71 |
with gr.Column(elem_id="col-container"):
|
|
|
75 |
llama_trans_cap = gr.Textbox(label="Llama translation")
|
76 |
img_result = gr.Video(label="Result")
|
77 |
|
78 |
+
infer_btn.click(fn=infer, inputs=[audio_input], outputs=[lpmc_cap, llama_trans_cap, img_result])
|
79 |
|
80 |
demo.queue().launch()
|