File size: 886 Bytes
5fed86e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import gradio as gr

# Load pre-trained tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('huggingartists/ed-sheeran')
model = AutoModelForCausalLM.from_pretrained('huggingartists/ed-sheeran', pad_token_id=50269)

# Function to generate predictions
def ed_lyrics(prompt):
    encoded_prompt = tokenizer.encode(prompt + "\n\nLyrics: ", add_special_tokens=False, return_tensors='pt').to('cpu')
    output_sequences = model.generate(encoded_prompt, max_length=75+len(encoded_prompt), top_p=0.8, do_sample=True)[0].tolist()
    generated_song = tokenizer.decode(output_sequences[:], clean_up_tokenization_spaces=True)
    final_result = generated_song.split("\n\n")[-1:]
    return final_result

# Launch interactive web demo
iface = gr.Interface(fn=ed_lyrics, inputs=["textbox", outputs="text").launch()