Emojinator / app.py
ai01firebird's picture
change to distilgpt2
16d4b86 verified
raw
history blame
1.7 kB
import gradio as gr
import re
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Modell und Tokenizer laden
# gpt2 outputs text!
#tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
#model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
# distilgpt2 is only 80MB
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
model = AutoModelForCausalLM.from_pretrained("distilgpt2")
# tiny-gpt2 is only 20MB -> NOK
#tokenizer = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
#model = AutoModelForCausalLM.from_pretrained("sshleifer/tiny-gpt2")
# conversion method
def text_to_emoji(input_text):
# Eingabetext bereinigen (optional)
cleaned_text = re.sub(r"[.,!?;:]", "", input_text)
# Prompt vorbereiten
prompt = f'Convert the following sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n"{cleaned_text}"\n\n'
# Tokenisierung und Generation
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_new_tokens=30,
do_sample=True,
temperature=0.8,
top_k=50
)
# Decodieren
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Nur den Teil nach dem Prompt extrahieren
emoji_part = generated_text[len(prompt):].strip()
return emoji_part
# Gradio UI
iface = gr.Interface(
fn=text_to_emoji,
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence..."),
outputs="text",
title="AI-Powered Emoji Translator",
description="Enter a sentence, and the AI will transform it into an emoji-version 🥳"
)
iface.launch()