File size: 2,058 Bytes
e8a2d7c
 
 
c644909
29fadbf
78f153b
29fadbf
 
 
 
 
 
e8a2d7c
6c5550d
dc0de09
e8a2d7c
 
e374794
78f153b
29fadbf
dc0de09
8a0b589
 
 
 
 
e8a2d7c
6c5550d
e8a2d7c
c644909
 
 
3e41b84
8a0b589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e8a2d7c
 
 
 
6c5550d
e8a2d7c
6c5550d
bf8b184
e8a2d7c
 
e374794
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import gradio as gr
from huggingface_hub import InferenceClient
import os
import re
#from transformers import Qwen2_5OmniForConditionalGeneration

#qwenModel = Qwen2_5OmniForConditionalGeneration.from_pretrained(
#    "Qwen/Qwen2.5-Omni-7B",
#    device_map="auto",
#    torch_dtype=torch.bfloat16,
#    attn_implementation="flash_attention_2",
#)

# Load API key from environment variables
HF_API_TOKEN = os.getenv("HUG_TOKEN_READ2")

# Hugging Face Inference API Client
#client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1", token=HF_API_TOKEN)
#client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN)
#client = InferenceClient(model=qwenModel, token=HF_API_TOKEN)
#client = InferenceClient(model="microsoft/Phi-4-mini-instruct", token=HF_API_TOKEN)
#client = InferenceClient(model="openai-community/gpt2", token=HF_API_TOKEN)
client = InferenceClient(
    provider="nebius",
    api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxx",
)

# Function to translate text into emojis
def text_to_emoji(text):
    # remove special characters
    text_cleaned = re.sub(r"[.,!?;:]", "", text)
    
    prompt = f"Convert this sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n\"{text_cleaned}\""
    #response = client.text_generation(prompt, max_new_tokens=50)
    #return response


completion = client.chat.completions.create(
    model="mistralai/Mistral-Small-3.1-24B-Instruct-2503",
    messages=[
        {
            "role": "user",
            "content": [
                {
                    "type": "text",
                    "text": prompt
                }
            ]
        }
    ],
    max_tokens=25,
)
    return completion.choices[0].message

# Gradio UI
iface = gr.Interface(
    fn=text_to_emoji,
    inputs=gr.Textbox(lines=2, placeholder="Enter a sentence..."),
    outputs="text",
    title="AI-Powered Emoji Translator",
    description="Enter a sentence, and the AI will transform it into an emoji-version 🥳"
)

iface.launch()