Spaces:
Sleeping
Sleeping
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import os | |
import re | |
#from transformers import Qwen2_5OmniForConditionalGeneration | |
#qwenModel = Qwen2_5OmniForConditionalGeneration.from_pretrained( | |
# "Qwen/Qwen2.5-Omni-7B", | |
# device_map="auto", | |
# torch_dtype=torch.bfloat16, | |
# attn_implementation="flash_attention_2", | |
#) | |
# Load API key from environment variables | |
HF_API_TOKEN = os.getenv("HUG_TOKEN") | |
# Hugging Face Inference API Client | |
#client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1", token=HF_API_TOKEN) | |
#client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN) | |
#client = InferenceClient(model=qwenModel, token=HF_API_TOKEN) | |
client = InferenceClient(model="microsoft/Phi-4-mini-instruct", token=HF_API_TOKEN) | |
# Function to translate text into emojis | |
def text_to_emoji(text): | |
# remove special characters | |
text_cleaned = re.sub(r"[.,!?;:]", "", text) | |
prompt = f"Convert this sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n\"{text_cleaned}\"" | |
response = client.text_generation(prompt, max_new_tokens=50) | |
return response | |
# Gradio UI | |
iface = gr.Interface( | |
fn=text_to_emoji, | |
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence..."), | |
outputs="text", | |
title="AI-Powered Emoji Translator", | |
description="Enter a sentence, and the AI will transform it into an emoji-version 🥳" | |
) | |
iface.launch() |