Spaces:
Paused
Paused
File size: 2,584 Bytes
0d33acd 0c21172 0d33acd 09780d3 0d33acd 09780d3 59ea9bc 0d33acd 4055f04 0d33acd 0c21172 0d33acd a59f564 fcae65f a59f564 801fc0e 0d33acd ca1c374 1bcb95d 0c21172 1bcb95d 0d33acd a59f564 0d33acd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import gradio as gr
import torch
from datasets import load_dataset
from diffusers import DiffusionPipeline
from transformers import (
WhisperForConditionalGeneration,
WhisperProcessor,
)
import os
MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
device = "cuda" if torch.cuda.is_available() else "cpu"
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-small").to(device)
processor = WhisperProcessor.from_pretrained("openai/whisper-small")
diffuser_pipeline = DiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
custom_pipeline="speech_to_image_diffusion",
speech_model=model,
speech_processor=processor,
use_auth_token=MY_SECRET_TOKEN,
revision="fp16",
torch_dtype=torch.float16,
)
diffuser_pipeline.enable_attention_slicing()
diffuser_pipeline = diffuser_pipeline.to(device)
#ββββββββββββββββββββββββββββββββββββββββββββ
# TESTING WITH DATASET
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
audio_sample = ds[3]
text = audio_sample["text"].lower()
speech_data = audio_sample["audio"]["array"]
#ββββββββββββββββββββββββββββββββββββββββββββ
# GRADIO SETUP
title = "Speech to Diffusion β’ Community Pipeline"
description = """
<p style='text-align: center;'>This demo can generate an image from an audio sample using pre-trained OpenAI whisper-small and Stable Diffusion.<br />
Community examples consist of both inference and training examples that have been added by the community.<br />
<a href='https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image' target='_blank'> Click here for more information about community pipelines </a>
</p>
"""
audio_input = gr.Audio(source="microphone", type="numpy")
image_output = gr.Image()
def speech_to_text(audio_sample):
#text = audio_sample["text"].lower()
#print(text)
#speech_data = audio_sample["audio"]["array"]
print(f"""
ββββββββ
audio sample: {audio_sample}
audio array: {audio_sample[1]}
ββββββββ
""")
#output = diffuser_pipeline(audio_sample[1])
output = diffuser_pipeline(speech_data)
print(f"""
ββββββββ
output: {output}
ββββββββ
""")
return output.images[0]
demo = gr.Interface(fn=speech_to_text, inputs=audio_input, outputs=image_output, title=title, description=description)
demo.launch() |