luminoussg commited on
Commit
dcc233f
·
verified ·
1 Parent(s): ca344cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -13
app.py CHANGED
@@ -26,19 +26,19 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> Generator[st
26
 
27
  # Model-specific prompt formatting with full history
28
  model_prompts = {
29
- "Qwen2.5-Coder-32B-Instruct": (
30
- f"<|im_start|>system\nProvide detailed technical analysis of:\n{conversation}<|im_end|>\n"
31
- "<|im_start|>assistant\nTechnical perspective:"
32
- ),
33
- "Qwen2.5-72B-Instruct": (
34
- f"<|im_start|>system\nBuild upon this technical analysis:\n{conversation}<|im_end|>\n"
35
- "<|im_start|>assistant\nCritical analysis:"
36
- ),
37
- "Llama3.3-70B-Instruct": (
38
- "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n"
39
- f"Synthesize and refine this analysis:\n{conversation}<|eot_id|>\n"
40
- "<|start_header_id|>assistant<|end_header_id|>\nIntegrated perspective:"
41
- )
42
  }
43
 
44
  client = InferenceClient(base_url=endpoint, token=HF_API_KEY)
 
26
 
27
  # Model-specific prompt formatting with full history
28
  model_prompts = {
29
+ "Qwen2.5-72B-Instruct": (
30
+ f"<|im_start|>system\nCollaborate with other experts. Previous discussion:\n{conversation}<|im_end|>\n"
31
+ "<|im_start|>assistant\nMy analysis:"
32
+ ),
33
+ "Llama3.3-70B-Instruct": (
34
+ "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n"
35
+ f"Build upon this discussion:\n{conversation}<|eot_id|>\n"
36
+ "<|start_header_id|>assistant<|end_header_id|>\nMy contribution:"
37
+ ),
38
+ "Qwen2.5-Coder-32B-Instruct": (
39
+ f"<|im_start|>system\nTechnical discussion context:\n{conversation}<|im_end|>\n"
40
+ "<|im_start|>assistant\nTechnical perspective:"
41
+ )
42
  }
43
 
44
  client = InferenceClient(base_url=endpoint, token=HF_API_KEY)