Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,6 +12,7 @@ HF_API_KEY = os.getenv("HF_API_KEY")
|
|
12 |
|
13 |
# Model endpoints configuration
|
14 |
MODEL_ENDPOINTS = {
|
|
|
15 |
"Qwen2.5-72B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct",
|
16 |
"Llama3.3-70B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Llama-3.3-70B-Instruct",
|
17 |
"Qwen2.5-Coder-32B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct",
|
@@ -26,6 +27,10 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> Generator[st
|
|
26 |
|
27 |
# Model-specific prompt formatting with full history
|
28 |
model_prompts = {
|
|
|
|
|
|
|
|
|
29 |
"Qwen2.5-72B-Instruct": (
|
30 |
f"<|im_start|>system\nCollaborate with other experts. Previous discussion:\n{conversation}<|im_end|>\n"
|
31 |
"<|im_start|>assistant\nMy analysis:"
|
@@ -82,8 +87,8 @@ def respond(message: str, history: List[List[str]], session_id: str) -> Generato
|
|
82 |
})
|
83 |
|
84 |
# Model responses
|
85 |
-
model_names = ["Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Llama3.3-70B-Instruct"]
|
86 |
-
model_colors = ["π΅", "π£", "π‘"]
|
87 |
responses = {}
|
88 |
|
89 |
# Initialize responses
|
|
|
12 |
|
13 |
# Model endpoints configuration
|
14 |
MODEL_ENDPOINTS = {
|
15 |
+
"DeepSeek-R1": "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
16 |
"Qwen2.5-72B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct",
|
17 |
"Llama3.3-70B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Llama-3.3-70B-Instruct",
|
18 |
"Qwen2.5-Coder-32B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
|
27 |
|
28 |
# Model-specific prompt formatting with full history
|
29 |
model_prompts = {
|
30 |
+
"DeepSeek-R1": (
|
31 |
+
f"<|im_start|>system\nProvide foundational analysis of:\n{conversation}<|im_end|>\n"
|
32 |
+
"<|im_start|>assistant\nFoundational perspective:"
|
33 |
+
),
|
34 |
"Qwen2.5-72B-Instruct": (
|
35 |
f"<|im_start|>system\nCollaborate with other experts. Previous discussion:\n{conversation}<|im_end|>\n"
|
36 |
"<|im_start|>assistant\nMy analysis:"
|
|
|
87 |
})
|
88 |
|
89 |
# Model responses
|
90 |
+
model_names = ["DeepSeek-R1", "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Llama3.3-70B-Instruct"]
|
91 |
+
model_colors = ["π΄", "π΅", "π£", "π‘"]
|
92 |
responses = {}
|
93 |
|
94 |
# Initialize responses
|