Emojinator / app.py
ai01firebird's picture
Update app.py
f799a07 verified
raw
history blame
1.53 kB
import gradio as gr
from prompt_prefix import prompt_prefix
import re
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Modell und Tokenizer laden
# distilgpt2 is only 80MB -> no inference model, thus add prompt_prefix or train
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
model = AutoModelForCausalLM.from_pretrained("distilgpt2")
# conversion method
def text_to_emoji(input_text):
# Eingabetext bereinigen (optional)
cleaned_text = re.sub(r"[.,!?;:]", "", input_text)
# Pattern-based prompt with data from prompt_prefix
prompt = "(\n" + "".join(f'"{line}\\n"\n' for line in prompt_prefix) + f"\"{input_text} β†’\"" + ")"
# Tokenisierung und Generation
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_new_tokens=10,
do_sample=True,
temperature=0.9,
top_k=50,
pad_token_id=tokenizer.eos_token_id # Prevents warning
)
# Decodieren
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Nur den generierten Teil nach dem letzten "β†’"
emoji_part = generated_text.split("β†’")[-1].strip().split("\n")[0]
return emoji_part
# Gradio UI
iface = gr.Interface(
fn=text_to_emoji,
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence..."),
outputs="text",
title="AI-Powered Emoji Translator",
description="Enter a sentence, and the AI will transform it into an emoji-version πŸ₯³"
)
iface.launch()