import gradio as gr from huggingface_hub import InferenceClient import os import re #from transformers import Qwen2_5OmniForConditionalGeneration #qwenModel = Qwen2_5OmniForConditionalGeneration.from_pretrained( # "Qwen/Qwen2.5-Omni-7B", # device_map="auto", # torch_dtype=torch.bfloat16, # attn_implementation="flash_attention_2", #) # Load API key from environment variables HF_API_TOKEN = os.getenv("HUG_TOKEN_READ2") #HF_INF_PROVIDERS_TOKEN #HF_INF_KEY = os.getenv("INF_NEBIUS") # Hugging Face Inference API Client #client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1", token=HF_API_TOKEN) #client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN) #client = InferenceClient(model=qwenModel, token=HF_API_TOKEN) #client = InferenceClient(model="microsoft/Phi-4-mini-instruct", token=HF_API_TOKEN) #client = InferenceClient(model="openai-community/gpt2", token=HF_API_TOKEN) client = InferenceClient( provider="nebius", #api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxx", api_key=HF_API_TOKEN ) # Function to translate text into emojis def text_to_emoji(text): # remove special characters text_cleaned = re.sub(r"[.,!?;:]", "", text) prompt = f"Convert this sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n\"{text_cleaned}\"" #response = client.text_generation(prompt, max_new_tokens=50) #return response completion = client.chat.completions.create( model="mistralai/Mistral-Small-3.1-24B-Instruct-2503", messages=[ { "role": "user", "content": [ { "type": "text", "text": prompt } ] } ], max_tokens=25 ) return completion.choices[0].message # Gradio UI iface = gr.Interface( fn=text_to_emoji, inputs=gr.Textbox(lines=2, placeholder="Enter a sentence..."), outputs="text", title="AI-Powered Emoji Translator", description="Enter a sentence, and the AI will transform it into an emoji-version 🥳" ) iface.launch()