Aspik101's picture
Update app.py
1c75b42
raw
history blame
13.7 kB
from transformers import VitsModel, AutoTokenizer
import soundfile as sf
import torch
from datetime import datetime
import random
import time
from ctransformers import AutoModelForCausalLM
from datetime import datetime
import whisper
from transformers import VitsModel, AutoTokenizer
import torch
from transformers import MusicgenForConditionalGeneration, AutoProcessor, set_seed
import torch
import numpy as np
import os
import argparse
import gradio as gr
from timeit import default_timer as timer
import torch
import numpy as np
import pandas as pd
from huggingface_hub import hf_hub_download
from model.bart import BartCaptionModel
from utils.audio_utils import load_audio, STR_CH_FIRST
from diffusers import DiffusionPipeline
from PIL import Image
def image_grid(imgs, rows, cols):
assert len(imgs) == rows*cols
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols*w, rows*h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i%cols*w, i//cols*h))
return grid
def save_to_txt(text_to_save):
with open('prompt.txt', 'w', encoding='utf-8') as f:
f.write(text_to_save)
def read_txt():
with open('prompt.txt') as f:
lines = f.readlines()
return lines
##### Chat z LLAMA ####
##### Chat z LLAMA ####
##### Chat z LLAMA ####
params = {
"max_new_tokens":512,
"stop":["<end>" ,"<|endoftext|>","[", "<user>"],
"temperature":0.7,
"top_p":0.8,
"stream":True,
"batch_size": 8}
whisper_model = whisper.load_model("medium").to("cuda")
print("Whisper Loaded!")
llm = AutoModelForCausalLM.from_pretrained("Aspik101/trurl-2-7b-pl-instruct_GGML", model_type="llama")
print("LLM Loaded!")
tts_model = VitsModel.from_pretrained("facebook/mms-tts-pol")
tts_model.to("cuda")
print("TTS Loaded!")
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-pol")
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16").to("cuda")
print("DiffusionPipeline Loaded!")
model_audio_gen = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small").to("cuda")
processor_audio_gen = AutoProcessor.from_pretrained("facebook/musicgen-small")
##### Chat z LLAMA ####
##### Chat z LLAMA ####
##### Chat z LLAMA ####
def _load_model_tokenizer():
model_id = 'tangger/Qwen-7B-Chat'
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",trust_remote_code=True, fp16=True).eval()
return model, tokenizer
model, tokenizer = _load_model_tokenizer()
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert(message),
None if response is None else mdtex2html.convert(response),
)
return y
def _parse_text(text):
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split("`")
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f"<br></code></pre>"
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", r"\`")
line = line.replace("<", "&lt;")
line = line.replace(">", "&gt;")
line = line.replace(" ", "&nbsp;")
line = line.replace("*", "&ast;")
line = line.replace("_", "&lowbar;")
line = line.replace("-", "&#45;")
line = line.replace(".", "&#46;")
line = line.replace("!", "&#33;")
line = line.replace("(", "&#40;")
line = line.replace(")", "&#41;")
line = line.replace("$", "&#36;")
lines[i] = "<br>" + line
text = "".join(lines)
return text
def predict(_query, _chatbot, _task_history):
print(f"User: {_parse_text(_query)}")
_chatbot.append((_parse_text(_query), ""))
full_response = ""
for response in model.chat_stream(tokenizer, _query, history=_task_history,system = "JesteΕ› assystentem AI. Odpowiadaj zawsze w jΔ™zyku poslkim" ):
_chatbot[-1] = (_parse_text(_query), _parse_text(response))
yield _chatbot
full_response = _parse_text(response)
print(f"History: {_task_history}")
_task_history.append((_query, full_response))
print(f"Qwen-7B-Chat: {_parse_text(full_response)}")
def read_text(text):
print("___Tekst do przeczytania!")
inputs = tokenizer_tss(text, return_tensors="pt").to("cuda")
with torch.no_grad():
output = tts_model(**inputs).waveform.squeeze().cpu().numpy()
sf.write('temp_file.wav', output, tts_model.config.sampling_rate)
return 'temp_file.wav'
def update_audio(text):
return 'temp_file.wav'
def translate(audio):
print("__WysyΕ‚am nagranie do whisper!")
transcription = whisper_model.transcribe(audio, language="pl")
return transcription["text"]
def predict(audio, _chatbot, _task_history):
# Użyj funkcji translate, aby przekształcić audio w tekst
_query = translate(audio)
print(f"____User: {_parse_text(_query)}")
_chatbot.append((_parse_text(_query), ""))
full_response = ""
for response in model.chat_stream(tokenizer,
_query,
history= _task_history,
system = "JesteΕ› assystentem AI. Odpowiadaj zawsze w jΔ™zyku polskim. Odpowiadaj krΓ³tko."):
_chatbot[-1] = (_parse_text(_query), _parse_text(response))
yield _chatbot
full_response = _parse_text(response)
print(f"____History: {_task_history}")
_task_history.append((_query, full_response))
print(f"__Qwen-7B-Chat: {_parse_text(full_response)}")
print("____full_response",full_response)
audio_file = read_text(_parse_text(full_response)) # Generowanie audio
return full_response
# return 'temp_file.wav' # ZwrΓ³cenie Ε›cieΕΌki do pliku audio
def regenerate(_chatbot, _task_history):
if not _task_history:
yield _chatbot
return
item = _task_history.pop(-1)
_chatbot.pop(-1)
yield from predict(item[0], _chatbot, _task_history)
with gr.Blocks() as chat_demo:
chatbot = gr.Chatbot(label='Llama Voice Chatbot', elem_classes="control-height")
query = gr.Textbox(lines=2, label='Input')
task_history = gr.State([])
audio_output = gr.Audio('temp_file.wav', label="Generated Audio (wav)", type='filepath', autoplay=False)
with gr.Row():
submit_btn = gr.Button("πŸš€ WyΕ›lij tekst")
with gr.Row():
audio_upload = gr.Audio(source="microphone", type="filepath", show_label=False)
submit_audio_btn = gr.Button("πŸŽ™οΈ WyΕ›lij audio")
submit_btn.click(predict, [query, chatbot, task_history], [chatbot], show_progress=True)
submit_audio_btn.click(predict, [audio_upload, chatbot, task_history], [chatbot], show_progress=True).then(update_audio, chatbot, audio_output)
chat_demo.queue().launch(share=False)
##### Audio Gen ####
##### Audio Gen ####
##### Audio Gen ####
sampling_rate = model_audio_gen.audio_encoder.config.sampling_rate
frame_rate = model_audio_gen.audio_encoder.config.frame_rate
text_encoder = model_audio_gen.get_text_encoder()
def generate_audio(decade, genre, instrument, guidance_scale=8, audio_length_in_s=20, seed=0):
prompt = " ".join([decade, genre, 'track with ', instrument])
save_to_txt(prompt)
inputs = processor_audio_gen(
text=[prompt, "drums"],
padding=True,
return_tensors="pt",
).to(device)
with torch.no_grad():
encoder_outputs = text_encoder(**inputs)
max_new_tokens = int(frame_rate * audio_length_in_s)
set_seed(seed)
audio_values = model_audio_gen.generate(inputs.input_ids[0][None, :], attention_mask=inputs.attention_mask, encoder_outputs=encoder_outputs, do_sample=True, guidance_scale=guidance_scale, max_new_tokens=max_new_tokens)
sf.write('generated_audio.wav', audio_values.cpu()[0][0], 32_000)
audio_values = (audio_values.cpu().numpy() * 32767).astype(np.int16)
return (sampling_rate, audio_values)
audio_gen = gr.Interface(
fn=generate_audio,
inputs=[
# gr.Text(label="Negative prompt", value="drums"),
gr.Radio(["50s", " 60s", "70s", "80s", "90s"], label="decade", info=""),
gr.Radio(["classic", "rock", "pop", "metal", "jazz", "synth"], label="genre", info=""),
gr.Radio(["acoustic guitar", "electric guitar", "drums", "saxophone", "keyboard", "accordion", "fiddle"], label="instrument", info=""),
gr.Slider(1.5, 10, value=8, step=0.5, label="Guidance scale"),
gr.Slider(5, 30, value=20, step=5, label="Audio length in s"),
# gr.Slider(0, 10, value=0, step=1, label="Seed"),
],
outputs=[
gr.Audio(label="Generated Music", type="numpy"),
]#,
# examples=EXAMPLES,
)
#### Audio desc and Stable ###
#### Audio desc and Stable ###
#### Audio desc and Stable ###
if os.path.isfile("transfer.pth") == False:
torch.hub.download_url_to_file('https://huggingface.co/seungheondoh/lp-music-caps/resolve/main/transfer.pth', 'transfer.pth')
torch.hub.download_url_to_file('https://huggingface.co/seungheondoh/lp-music-caps/resolve/main/folk.wav', 'folk.wav')
torch.hub.download_url_to_file('https://huggingface.co/seungheondoh/lp-music-caps/resolve/main/electronic.mp3', 'electronic.mp3')
torch.hub.download_url_to_file('https://huggingface.co/seungheondoh/lp-music-caps/resolve/main/orchestra.wav', 'orchestra.wav')
device = "cuda:0" if torch.cuda.is_available() else "cpu"
example_list = ['folk.wav', 'electronic.mp3', 'orchestra.wav']
model = BartCaptionModel(max_length = 128)
pretrained_object = torch.load('./transfer.pth', map_location='cpu')
state_dict = pretrained_object['state_dict']
model.load_state_dict(state_dict)
if torch.cuda.is_available():
torch.cuda.set_device(device)
model = model.cuda(device)
model.eval()
def get_audio(audio_path, duration=10, target_sr=16000):
n_samples = int(duration * target_sr)
audio, sr = load_audio(
path= audio_path,
ch_format= STR_CH_FIRST,
sample_rate= target_sr,
downmix_to_mono= True,
)
if len(audio.shape) == 2:
audio = audio.mean(0, False) # to mono
input_size = int(n_samples)
if audio.shape[-1] < input_size: # pad sequence
pad = np.zeros(input_size)
pad[: audio.shape[-1]] = audio
audio = pad
ceil = int(audio.shape[-1] // n_samples)
audio = torch.from_numpy(np.stack(np.split(audio[:ceil * n_samples], ceil)).astype('float32'))
return audio
def captioning(audio_path):
audio_tensor = get_audio(audio_path = audio_path)
if torch.cuda.is_available():
audio_tensor = audio_tensor.to(device)
with torch.no_grad():
output = model.generate(
samples=audio_tensor,
num_beams=5,
)
inference = ""
number_of_chunks = range(audio_tensor.shape[0])
for chunk, text in zip(number_of_chunks, output):
time = f"[{chunk * 10}:00-{(chunk + 1) * 10}:00]"
inference += f"{time}\n{text} \n \n"
return inference
title = ""
description = ""
article = ""
def captioning():
audio_path = 'generated_audio.wav'
audio_tensor = get_audio(audio_path=audio_path)
if torch.cuda.is_available():
audio_tensor = audio_tensor.to(device)
with torch.no_grad():
output = model.generate(
samples=audio_tensor,
num_beams=5)
inference = ""
number_of_chunks = range(audio_tensor.shape[0])
for chunk, text in zip(number_of_chunks, output):
time = f"[{chunk * 10}:00-{(chunk + 1) * 10}:00]"
inference += f"{time}\n{text} \n \n"
prompt = read_txt()
print(prompt[0])
# Generuj obraz na podstawie tekstu
#generated_images = pipe(prompt=prompt[0]*5 + inference + prompt[0]*5).images
#image = generated_images[0]
num_images = 3
prompt = [prompt[0]*5 + inference + prompt[0]*5] * num_images
images = pipe(prompt, height=768, width=768).images
grid = image_grid(images, rows=1, cols=3)
return inference, grid
audio_desc = gr.Interface(fn=captioning,
inputs=None,
outputs=[
gr.Textbox(label="Caption generated by LP-MusicCaps Transfer Model"),
gr.Image(label="Generated Image") # Dodane wyjΕ›cie dla obrazu
],
title=title,
description=description,
article=article,
cache_examples=False
)
music = gr.Video("muzyka_AI.mp4")
voice_cloning = gr.Video("voice_cloning_fraud.mp4")
##### Run Alll #######
##### Run Alll #######
##### Run Alll #######
demo_all = gr.TabbedInterface([music, audio_gen, audio_desc, voice_cloning, chat_demo], ["1.Music", "2.Audio Generation", "3.Image Generation", "4.Voice Cloning", "5.Chat with LLama"])
demo_all.queue()
demo_all.launch()