Spaces:
Sleeping
Sleeping
File size: 1,494 Bytes
e8a2d7c c644909 29fadbf 78f153b 29fadbf e8a2d7c 6c5550d 57bc78a e8a2d7c 371da61 78f153b 29fadbf 371da61 e8a2d7c 6c5550d e8a2d7c c644909 3e41b84 e8a2d7c 6c5550d e8a2d7c 6c5550d bf8b184 e8a2d7c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re
#from transformers import Qwen2_5OmniForConditionalGeneration
#qwenModel = Qwen2_5OmniForConditionalGeneration.from_pretrained(
# "Qwen/Qwen2.5-Omni-7B",
# device_map="auto",
# torch_dtype=torch.bfloat16,
# attn_implementation="flash_attention_2",
#)
# Load API key from environment variables
HF_API_TOKEN = os.getenv("HUG_TOKEN")
# Hugging Face Inference API Client
#client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1", token=HF_API_TOKEN)
#client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN)
#client = InferenceClient(model=qwenModel, token=HF_API_TOKEN)
client = InferenceClient(model="microsoft/Phi-4-mini-instruct", token=HF_API_TOKEN)
# Function to translate text into emojis
def text_to_emoji(text):
# remove special characters
text_cleaned = re.sub(r"[.,!?;:]", "", text)
prompt = f"Convert this sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n\"{text_cleaned}\""
response = client.text_generation(prompt, max_new_tokens=50)
return response
# Gradio UI
iface = gr.Interface(
fn=text_to_emoji,
inputs=gr.Textbox(lines=2, placeholder="Enter a sentence..."),
outputs="text",
title="AI-Powered Emoji Translator",
description="Enter a sentence, and the AI will transform it into an emoji-version 🥳"
)
iface.launch() |