ai01firebird commited on
Commit
8a0b589
·
verified ·
1 Parent(s): e374794

change to nebius inference

Browse files
Files changed (1) hide show
  1. app.py +25 -3
app.py CHANGED
@@ -19,7 +19,11 @@ HF_API_TOKEN = os.getenv("HUG_TOKEN_READ2")
19
  #client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN)
20
  #client = InferenceClient(model=qwenModel, token=HF_API_TOKEN)
21
  #client = InferenceClient(model="microsoft/Phi-4-mini-instruct", token=HF_API_TOKEN)
22
- client = InferenceClient(model="openai-community/gpt2", token=HF_API_TOKEN)
 
 
 
 
23
 
24
  # Function to translate text into emojis
25
  def text_to_emoji(text):
@@ -27,8 +31,26 @@ def text_to_emoji(text):
27
  text_cleaned = re.sub(r"[.,!?;:]", "", text)
28
 
29
  prompt = f"Convert this sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n\"{text_cleaned}\""
30
- response = client.text_generation(prompt, max_new_tokens=50)
31
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  # Gradio UI
34
  iface = gr.Interface(
 
19
  #client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN)
20
  #client = InferenceClient(model=qwenModel, token=HF_API_TOKEN)
21
  #client = InferenceClient(model="microsoft/Phi-4-mini-instruct", token=HF_API_TOKEN)
22
+ #client = InferenceClient(model="openai-community/gpt2", token=HF_API_TOKEN)
23
+ client = InferenceClient(
24
+ provider="nebius",
25
+ api_key="hf_xxxxxxxxxxxxxxxxxxxxxxxx",
26
+ )
27
 
28
  # Function to translate text into emojis
29
  def text_to_emoji(text):
 
31
  text_cleaned = re.sub(r"[.,!?;:]", "", text)
32
 
33
  prompt = f"Convert this sentence into an emoji-sequence which conveys a similar meaning and return only the emojis, no explanation:\n\n\"{text_cleaned}\""
34
+ #response = client.text_generation(prompt, max_new_tokens=50)
35
+ #return response
36
+
37
+
38
+ completion = client.chat.completions.create(
39
+ model="mistralai/Mistral-Small-3.1-24B-Instruct-2503",
40
+ messages=[
41
+ {
42
+ "role": "user",
43
+ "content": [
44
+ {
45
+ "type": "text",
46
+ "text": prompt
47
+ }
48
+ ]
49
+ }
50
+ ],
51
+ max_tokens=25,
52
+ )
53
+ return completion.choices[0].message
54
 
55
  # Gradio UI
56
  iface = gr.Interface(