Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -26,19 +26,19 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> Generator[st
|
|
26 |
|
27 |
# Model-specific prompt formatting with full history
|
28 |
model_prompts = {
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
"
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
}
|
43 |
|
44 |
client = InferenceClient(base_url=endpoint, token=HF_API_KEY)
|
|
|
26 |
|
27 |
# Model-specific prompt formatting with full history
|
28 |
model_prompts = {
|
29 |
+
"Qwen2.5-72B-Instruct": (
|
30 |
+
f"<|im_start|>system\nCollaborate with other experts. Previous discussion:\n{conversation}<|im_end|>\n"
|
31 |
+
"<|im_start|>assistant\nMy analysis:"
|
32 |
+
),
|
33 |
+
"Llama3.3-70B-Instruct": (
|
34 |
+
"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n"
|
35 |
+
f"Build upon this discussion:\n{conversation}<|eot_id|>\n"
|
36 |
+
"<|start_header_id|>assistant<|end_header_id|>\nMy contribution:"
|
37 |
+
),
|
38 |
+
"Qwen2.5-Coder-32B-Instruct": (
|
39 |
+
f"<|im_start|>system\nTechnical discussion context:\n{conversation}<|im_end|>\n"
|
40 |
+
"<|im_start|>assistant\nTechnical perspective:"
|
41 |
+
)
|
42 |
}
|
43 |
|
44 |
client = InferenceClient(base_url=endpoint, token=HF_API_KEY)
|