msVision_3 / app.py
seawolf2357's picture
Update app.py
dedab71 verified
raw
history blame
3.72 kB
import gradio as gr
from transformers import pipeline
from gradio_client import Client # ๊ฐ€์ •: gradio_client ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ๊ฐ€ ์‚ฌ์šฉ ๊ฐ€๋Šฅํ•˜๋‹ค.
import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
import torch
from PIL import Image
model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
max_length = 16
num_beams = 4
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
def predict_step(image_paths):
images = []
for image_path in image_paths:
i_image = Image.open(image_path)
if i_image.mode != "RGB":
i_image = i_image.convert(mode="RGB")
images.append(i_image)
pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values
pixel_values = pixel_values.to(device)
output_ids = model.generate(pixel_values, **gen_kwargs)
preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
return preds
predict_step(pipeline)
# ์ด๋ฏธ์ง€ ์ธ์‹ ํŒŒ์ดํ”„๋ผ์ธ ๋กœ๋“œ
image_model = pipeline("image-classification", model="google/vit-base-patch16-224")
def generate_music(prompt):
# audioldm API ์‚ฌ์šฉํ•˜์—ฌ ์Œ์•… ์ƒ์„ฑ API ํ˜ธ์ถœ
client = Client("https://haoheliu-audioldm2-text2audio-text2music.hf.space/")
result = client.predict(
"playing piano.", # str in 'Input text' Textbox component
"Low quality.", # str in 'Negative prompt' Textbox component
5, # int | float (numeric value between 5 and 15) in 'Duration (seconds)' Slider component
5.5, # int | float (numeric value between 0 and 7) in 'Guidance scale' Slider component
5, # int | float in 'Seed' Number component
3, # int | float (numeric value between 1 and 5) in 'Number waveforms to generate' Slider component
fn_index=1
)
print(result)
#audio_result = extract_audio(result)
return result
def generate_voice(prompt):
# Tango API๋ฅผ ์‚ฌ์šฉํ•˜์—ฌ ์Œ์„ฑ ์ƒ์„ฑ
client = Client("https://declare-lab-tango.hf.space/")
result = client.predict(
prompt, # ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜ ๊ฒฐ๊ณผ๋ฅผ ํ”„๋กฌํ”„ํŠธ๋กœ ์‚ฌ์šฉ
100, # Steps
1, # Guidance Scale
api_name="/predict" # API ์—”๋“œํฌ์ธํŠธ ๊ฒฝ๋กœ
)
# Tango API ํ˜ธ์ถœ ๊ฒฐ๊ณผ ์ฒ˜๋ฆฌ
# ์˜ˆ: result์—์„œ ์Œ์„ฑ ํŒŒ์ผ URL ๋˜๋Š” ๋ฐ์ดํ„ฐ ์ถ”์ถœ
return result
def classify_and_generate_voice(uploaded_image):
# ์ด๋ฏธ์ง€ ๋ถ„๋ฅ˜
predictions = image_model(uploaded_image)
top_prediction = predictions[0]['label'] # ๊ฐ€์žฅ ํ™•๋ฅ ์ด ๋†’์€ ๋ถ„๋ฅ˜ ๊ฒฐ๊ณผ
# ์Œ์„ฑ ์ƒ์„ฑ
voice_result = generate_voice("this is " + top_prediction)
# ์Œ์•… ์ƒ์„ฑ
music_result = generate_music("The rnb beat of 85BPM drums." + top_prediction + ".")
# ๋ฐ˜ํ™˜๋œ ์Œ์„ฑ ๋ฐ ์Œ์•… ๊ฒฐ๊ณผ๋ฅผ Gradio ์ธํ„ฐํŽ˜์ด์Šค๋กœ ์ „๋‹ฌ
# ์˜ˆ: voice_result['url'] ๋˜๋Š” voice_result['audio_data'] ๋“ฑ
return top_prediction, voice_result, music_result
# Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
iface = gr.Interface(
fn=classify_and_generate_voice,
inputs=gr.Image(type="pil"),
outputs=[gr.Label(), gr.Audio(), gr.Audio()],
title="msVision_3",
description="์ด๋ฏธ์ง€๋ฅผ ์—…๋กœ๋“œํ•˜๋ฉด, ์‚ฌ๋ฌผ์„ ์ธ์‹ํ•˜๊ณ  ํ•ด๋‹นํ•˜๋Š” ์Œ์„ฑ ๋ฐ ์Œ์•…์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.(recognizes object and generate Voice&Music)",
examples=["dog.jpg", "cat.png", "cafe.jpg"]
)
# ์ธํ„ฐํŽ˜์ด์Šค ์‹คํ–‰
iface.launch()