luminoussg commited on
Commit
3df9d67
Β·
verified Β·
1 Parent(s): 88b7549

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -15
app.py CHANGED
@@ -12,7 +12,6 @@ HF_API_KEY = os.getenv("HF_API_KEY")
12
 
13
  # Model endpoints configuration
14
  MODEL_ENDPOINTS = {
15
- "DeepSeek-R1": "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
16
  "Qwen2.5-72B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct",
17
  "Llama3.3-70B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Llama-3.3-70B-Instruct",
18
  "Qwen2.5-Coder-32B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct",
@@ -27,22 +26,35 @@ def query_model(model_name: str, messages: List[Dict[str, str]]) -> Generator[st
27
 
28
  # Model-specific prompt formatting with full history
29
  model_prompts = {
30
- "DeepSeek-R1": (
31
- f"<|im_start|>system\nProvide foundational analysis of:\n{conversation}<|im_end|>\n"
32
- "<|im_start|>assistant\nFoundational perspective:"
 
 
 
 
 
 
33
  ),
34
  "Qwen2.5-72B-Instruct": (
35
- f"<|im_start|>system\nCollaborate with other experts. Previous discussion:\n{conversation}<|im_end|>\n"
36
- "<|im_start|>assistant\nMy analysis:"
 
 
 
 
 
 
37
  ),
38
  "Llama3.3-70B-Instruct": (
39
- "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n"
40
- f"Build upon this discussion:\n{conversation}<|eot_id|>\n"
41
- "<|start_header_id|>assistant<|end_header_id|>\nMy contribution:"
42
- ),
43
- "Qwen2.5-Coder-32B-Instruct": (
44
- f"<|im_start|>system\nTechnical discussion context:\n{conversation}<|im_end|>\n"
45
- "<|im_start|>assistant\nTechnical perspective:"
 
46
  )
47
  }
48
 
@@ -87,8 +99,8 @@ def respond(message: str, history: List[List[str]], session_id: str) -> Generato
87
  })
88
 
89
  # Model responses
90
- model_names = ["DeepSeek-R1", "Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Llama3.3-70B-Instruct"]
91
- model_colors = ["πŸ”΄", "πŸ”΅", "🟣", "🟑"]
92
  responses = {}
93
 
94
  # Initialize responses
 
12
 
13
  # Model endpoints configuration
14
  MODEL_ENDPOINTS = {
 
15
  "Qwen2.5-72B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct",
16
  "Llama3.3-70B-Instruct": "https://api-inference.huggingface.co/models/meta-llama/Llama-3.3-70B-Instruct",
17
  "Qwen2.5-Coder-32B-Instruct": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B-Instruct",
 
26
 
27
  # Model-specific prompt formatting with full history
28
  model_prompts = {
29
+ "Qwen2.5-Coder-32B-Instruct": (
30
+ "<system>Technical Expert</system>\n"
31
+ "<conversation>\n{conversation}</conversation>\n"
32
+ "<instructions>\n"
33
+ "1. Provide technical implementation details\n"
34
+ "2. Identify potential edge cases\n"
35
+ "3. Suggest code snippets where applicable\n"
36
+ "Format: <technical>{response}</technical>"
37
+ "</instructions>\n"
38
  ),
39
  "Qwen2.5-72B-Instruct": (
40
+ "<system>Analytical Expert</system>\n"
41
+ "<conversation>\n{conversation}</conversation>\n"
42
+ "<instructions>\n"
43
+ "1. Analyze technical suggestions\n"
44
+ "2. Highlight strengths/weaknesses\n"
45
+ "3. Propose alternative approaches\n"
46
+ "Format: <analysis>{response}</analysis>"
47
+ "</instructions>\n"
48
  ),
49
  "Llama3.3-70B-Instruct": (
50
+ "<system>Integration Expert</system>\n"
51
+ "<conversation>\n{conversation}</conversation>\n"
52
+ "<instructions>\n"
53
+ "1. Synthesize previous contributions\n"
54
+ "2. Resolve conflicting suggestions\n"
55
+ "3. Present final integrated solution\n"
56
+ "Format: <synthesis>{response}</synthesis>"
57
+ "</instructions>\n"
58
  )
59
  }
60
 
 
99
  })
100
 
101
  # Model responses
102
+ model_names = ["Qwen2.5-Coder-32B-Instruct", "Qwen2.5-72B-Instruct", "Llama3.3-70B-Instruct"]
103
+ model_colors = ["πŸ”΅", "🟣", "🟑"]
104
  responses = {}
105
 
106
  # Initialize responses