Spaces:
Sleeping
Sleeping
revert to use Mistral-7B-Instruct-v0.1
Browse files
app.py
CHANGED
@@ -2,22 +2,22 @@ import gradio as gr
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import re
|
5 |
-
from transformers import Qwen2_5OmniForConditionalGeneration
|
6 |
|
7 |
-
qwenModel = Qwen2_5OmniForConditionalGeneration.from_pretrained(
|
8 |
-
"Qwen/Qwen2.5-Omni-7B",
|
9 |
-
device_map="auto",
|
10 |
-
torch_dtype=torch.bfloat16,
|
11 |
-
attn_implementation="flash_attention_2",
|
12 |
-
)
|
13 |
|
14 |
# Load API key from environment variables
|
15 |
HF_API_TOKEN = os.getenv("HUG_TOKEN_READ")
|
16 |
|
17 |
# Hugging Face Inference API Client
|
18 |
-
|
19 |
#client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN)
|
20 |
-
client = InferenceClient(model=qwenModel, token=HF_API_TOKEN)
|
21 |
|
22 |
# Function to translate text into emojis
|
23 |
def text_to_emoji(text):
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import os
|
4 |
import re
|
5 |
+
#from transformers import Qwen2_5OmniForConditionalGeneration
|
6 |
|
7 |
+
#qwenModel = Qwen2_5OmniForConditionalGeneration.from_pretrained(
|
8 |
+
# "Qwen/Qwen2.5-Omni-7B",
|
9 |
+
# device_map="auto",
|
10 |
+
# torch_dtype=torch.bfloat16,
|
11 |
+
# attn_implementation="flash_attention_2",
|
12 |
+
#)
|
13 |
|
14 |
# Load API key from environment variables
|
15 |
HF_API_TOKEN = os.getenv("HUG_TOKEN_READ")
|
16 |
|
17 |
# Hugging Face Inference API Client
|
18 |
+
client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.1", token=HF_API_TOKEN)
|
19 |
#client = InferenceClient(model="openGPT-X/Teuken-7B-instruct-commercial-v0.4", token=HF_API_TOKEN)
|
20 |
+
#client = InferenceClient(model=qwenModel, token=HF_API_TOKEN)
|
21 |
|
22 |
# Function to translate text into emojis
|
23 |
def text_to_emoji(text):
|