Spaces:
Sleeping
Sleeping
import torch | |
import os | |
import random | |
import gradio as gr | |
from transformers import pipeline | |
import base64 | |
from datasets import load_dataset | |
from diffusers import DiffusionPipeline | |
from huggingface_hub import login | |
import numpy as np | |
def guessanImage(model, image): | |
imgclassifier = pipeline("image-classification", model=model) | |
if image is not None: | |
description = imgclassifier(image) | |
return description | |
def guessanAge(model, image): | |
imgclassifier = pipeline("image-classification", model=model) | |
if image is not None: | |
description = imgclassifier(image) | |
return description | |
def text2speech(model, text, voice): | |
print(voice) | |
if len(text) > 0: | |
synthesiser = pipeline("text-to-speech", model=model) | |
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") | |
speaker_embedding = torch.tensor(embeddings_dataset[voice]["xvector"]).unsqueeze(0) | |
speech = synthesiser(text, forward_params={"speaker_embeddings": speaker_embedding}) | |
audio_data = np.frombuffer(speech["audio"], dtype=np.float32) | |
audio_data_16bit = (audio_data * 32767).astype(np.int16) | |
return speech["sampling_rate"], audio_data_16bit | |
radio1 = gr.Radio(["microsoft/resnet-50", "google/vit-base-patch16-224", "apple/mobilevit-small"], label="Select a Classifier", info="Image Classifier") | |
tab1 = gr.Interface( | |
fn=guessanImage, | |
inputs=[radio1, gr.Image(type="pil")], | |
outputs=["text"], | |
) | |
radio2 = gr.Radio(["nateraw/vit-age-classifier"], label="Select an Age Classifier", info="Age Classifier") | |
tab2 = gr.Interface( | |
fn=guessanAge, | |
inputs=[radio2, gr.Image(type="pil")], | |
outputs=["text"], | |
) | |
radio3 = gr.Radio(["microsoft/speecht5_tts"], label="Select an tts", info="Age Classifier") | |
radio3_1 = gr.Radio([("Scottish male (awb)", 0), ("US male (bdl)", 1138), ("US female (clb)", 2271), ("Canadian male (jmk)",3403), ("Indian male (ksp)", 4535), ("US male (rms)", 5667), (6799, "US female (slt)")], value=2) | |
tab3 = gr.Interface( | |
fn=text2speech, | |
inputs=[radio3, "text", radio3_1], | |
outputs=["audio"], | |
) | |
demo = gr.TabbedInterface([tab1, tab2, tab3], ["tab1", "tab2", "tab3"]) | |
demo.launch() | |