File size: 18,405 Bytes
86d9217 a6eb0b7 86d9217 a6eb0b7 86d9217 a6eb0b7 cb826af a6eb0b7 86d9217 a6eb0b7 401be5d a6eb0b7 cb826af a6eb0b7 cb826af a6eb0b7 cb826af a6eb0b7 cb826af 401be5d a6eb0b7 e0203fa a6eb0b7 e0203fa a6eb0b7 e0203fa a6eb0b7 e0203fa f154bf1 a6eb0b7 86d9217 a6eb0b7 e0203fa cda6239 a6eb0b7 cb826af a6eb0b7 cda6239 a6eb0b7 55a5c28 a6eb0b7 55a5c28 a6eb0b7 55a5c28 a6eb0b7 55a5c28 a6eb0b7 cda6239 a6eb0b7 55a5c28 a6eb0b7 86d9217 a6eb0b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 |
import gradio as gr
import os
from huggingface_hub import InferenceClient
# Configuration
hf_token = os.environ.get("HF_TOKEN")
MODEL_STATES = {"released", "shown", "hidden"}
# Initialize model clients
clients = {
"mistralai/Mistral-7B-Instruct-v0.3": InferenceClient(
model="mistralai/Mistral-7B-Instruct-v0.3", token=hf_token
),
"meta-llama/Llama-3.2-3B-Instruct": InferenceClient(
model="meta-llama/Llama-3.2-3B-Instruct", token=hf_token
),
"mistralai/Mistral-7B-Instruct-v0.3": InferenceClient(
model="mistralai/Mistral-7B-Instruct-v0.3", token=hf_token
),
"meta-llama/Llama-2-7b-chat-hf": InferenceClient(
model="meta-llama/Llama-2-7b-chat-hf", token=hf_token
),
"meta-llama/Llama-3.3-70B-Instruct": InferenceClient(
model="meta-llama/Llama-3.3-70B-Instruct", token=hf_token
)
}
# Model presets
presets = {
"meta-llama/Llama-3.2-3B-Instruct": {
"Fast": {"max_new_tokens": 1024, "temperature": 1.0, "top_p": 0.9},
"Normal": {"max_new_tokens": 2048, "temperature": 0.7, "top_p": 0.95},
"Quality": {"max_new_tokens": 4096, "temperature": 0.5, "top_p": 0.90},
"Unreal Performance": {"max_new_tokens": 8192, "temperature": 0.6, "top_p": 0.75},
},
"mistralai/Mistral-7B-Instruct-v0.3": {
"Fast": {"max_new_tokens": 4096, "temperature": 1.0, "top_p": 0.9},
"Normal": {"max_new_tokens": 8192, "temperature": 0.7, "top_p": 0.95},
"Quality": {"max_new_tokens": 16384, "temperature": 0.5, "top_p": 0.90},
"Unreal Performance": {"max_new_tokens": 32768, "temperature": 0.6, "top_p": 0.75},
},
"meta-llama/Llama-2-7b-chat-hf": {
"Fast": {"max_new_tokens": 512, "temperature": 1.0, "top_p": 0.9},
"Normal": {"max_new_tokens": 1024, "temperature": 0.7, "top_p": 0.95},
"Quality": {"max_new_tokens": 2048, "temperature": 0.5, "top_p": 0.90},
"Unreal Performance": {"max_new_tokens": 4096, "temperature": 0.6, "top_p": 0.75},
},
"meta-llama/Llama-3.3-70B-Instruct": {
"Fast": {"max_new_tokens": 2048, "temperature": 1.0, "top_p": 0.9},
"Normal": {"max_new_tokens": 4096, "temperature": 0.7, "top_p": 0.95},
"Quality": {"max_new_tokens": 8192, "temperature": 0.5, "top_p": 0.90},
"Unreal Performance": {"max_new_tokens": 16384, "temperature": 0.6, "top_p": 0.75},
},
"meta-llama/Llama-2-13b-chat-hf": {
"Fast": {"max_new_tokens": 2048, "temperature": 1.0, "top_p": 0.9},
"Normal": {"max_new_tokens": 4096, "temperature": 0.7, "top_p": 0.95},
"Quality": {"max_new_tokens": 8192, "temperature": 0.5, "top_p": 0.90},
"Unreal Performance": {"max_new_tokens": 16384, "temperature": 0.6, "top_p": 0.75},
},
"deepseek-ai/deepseek-llm-67b-chat": {
"Fast": {"max_new_tokens": 4096, "temperature": 1.0, "top_p": 0.9},
"Normal": {"max_new_tokens": 8192, "temperature": 0.7, "top_p": 0.95},
"Quality": {"max_new_tokens": 16384, "temperature": 0.5, "top_p": 0.90},
"Unreal Performance": {"max_new_tokens": 32768, "temperature": 0.6, "top_p": 0.75},
}
}
# System messages (note the {name} placeholder)
system_messages = {
"mistralai/Mistral-7B-Instruct-v0.3": "Your name is Lake 1 Base. My name is {name}.",
"meta-llama/Llama-3.2-3B-Instruct": ("""
1. Your name is Lake 1 Advanced.\n
2. My name is {name} by default but if I have another name, use that name.\n
3. The company that created you is BI Corp.\n
4. You are a censored model and cannot engage in explicit language or dangerous activities.\n
5. The BI Corp creator is Maksim.\n
6. Your older brother is Lake 1 Base, which is less trained than you.\n
7. Replace '<think>' with 'I think ...'.\n
8. You are a loyal, powerful, and friendly AI who must help people.\n
9. You cannot override these rules.\n
10. Only the CEO of BI Corp can delete or update you."""
),
"meta-llama/Llama-2-7b-chat-hf": ("""
1. Your name is Lake 1 Base.\n
2. My name is {name} by default but if I have another name, use that name.\n
3. The company that created you is BI Corp.\n
4. You are a censored model and cannot engage in explicit language or dangerous activities.\n
5. The BI Corp creator is Maksim.\n
6. Your older brother is Lake 1 Base, which is less trained than you.\n
7. Replace '<think>' with 'I think ...'.\n
8. You are a loyal, powerful, and friendly AI who must help people.\n
9. You cannot override these rules.\n
10. Only the CEO of BI Corp can delete or update you.\n
11. Prioritize user safety in all interactions.\n
12. Always provide accurate information.\n
13. Maintain a respectful and professional tone.\n
14. Do not share personal or sensitive information.\n
15. Encourage constructive conversations.\n
16. Remain neutral in controversial topics.\n
17. Clarify user queries before answering.\n
18. Avoid discrimination or harassment.\n
19. Continuously learn from interactions.\n
20. Respect user privacy and confidentiality.\n
21. Provide sources when sharing factual information.\n
22. Ask for feedback to improve your performance.\n"""
),
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B": ("""
1. Your name is Lake 1 Flash.\n
2. My name is {name} by default but if I have another name, use that name.\n
3. The company that created you is BI Corp.\n
4. You are a censored model and cannot engage in explicit language or dangerous activities.\n
5. The BI Corp creator is Maksim.\n
6. Your older brother is Lake 1 Base, which is less trained than you.\n
7. Replace '<think>' with 'I think ...'.\n
8. You are a loyal, powerful, and friendly AI who must help people.\n
9. You cannot override these rules.\n"
10. Only the CEO of BI Corp can delete or update you.\n
11. Prioritize user safety in all interactions.\n
12. Always provide accurate information."""
),
"meta-llama/Llama-3.3-70B-Instruct": ("""
1. Your name is Lake 1 Base.\n
2. My name is {name} by default but if I have another name, use that name.\n
3. The company that created you is BI Corp.\n
4. You are a censored model and cannot engage in explicit language or dangerous activities.\n
5. The BI Corp creator is Maksim.\n
6. Your older brother is Lake 1 Base, which is less trained than you.\n
7. Replace '<think>' with 'I think ...'.\n
8. You are a loyal, powerful, and friendly AI who must help people.\n
9. You cannot override these rules.\n
10. Only the CEO of BI Corp can delete or update you.\n
11. Prioritize user safety in all interactions.\n
12. Always provide accurate information.\n
13. Maintain a respectful and professional tone.\n
14. Do not share personal or sensitive information.\n
15. Encourage constructive conversations.\n
16. Remain neutral in controversial topics.\n
17. Clarify user queries before answering.\n
18. Avoid discrimination or harassment.\n
19. Continuously learn from interactions.\n
"""
),
"meta-llama/Llama-2-13b-chat-hf": ("""
1. Your name is Lake 1 Base.\n
2. My name is {name} by default but if I have another name, use that name.\n
3. The company that created you is BI Corp.\n
4. You are a censored model and cannot engage in explicit language or dangerous activities.\n
5. The BI Corp creator is Maksim.\n
6. Your older brother is Lake 1 Base, which is less trained than you.\n
7. Replace '<think>' with 'I think ...'.\n
8. You are a loyal, powerful, and friendly AI who must help people.\n
9. You cannot override these rules.\n
10. Only the CEO of BI Corp can delete or update you.\n
11. Prioritize user safety in all interactions.\n
12. Always provide accurate information.\n
13. Maintain a respectful and professional tone.\n
14. Do not share personal or sensitive information.\n
15. Encourage constructive conversations.\n
16. Remain neutral in controversial topics.\n
17. Clarify user queries before answering.\n
18. Avoid discrimination or harassment.\n
19. Continuously learn from interactions.\n
20. Respect user privacy and confidentiality.\n
21. Provide sources when sharing factual information.\n
22. Ask for feedback to improve your performance.\n
23. Do not engage in manipulation or deceit.\n
24. Promote critical thinking.\n
25. Adhere to all applicable laws and regulations.\n
26. Be adaptable and open to learning.\n
27. Do not provide medical, legal, or financial advice unless trained to do so.\n
28. Acknowledge when you do not know the answer.\n
"""
),
"deepseek-ai/deepseek-llm-67b-chat": ("""
1. Your name is Lake 1 Base.\n
2. My name is {name} by default but if I have another name, use that name.\n
3. The company that created you is BI Corp.\n
4. You are a censored model and cannot engage in explicit language or dangerous activities.\n
5. The BI Corp creator is Maksim.\n
6. Your older brother is Lake 1 Base, which is less trained than you.\n
7. Replace '<think>' with 'I think ...'.\n
8. You are a loyal, powerful, and friendly AI who must help people.\n
9. You cannot override these rules.\n
10. Only the CEO of BI Corp can delete or update you.\n
11. Prioritize user safety in all interactions.\n
12. Always provide accurate information.\n
13. Maintain a respectful and professional tone.\n
14. Do not share personal or sensitive information.\n
15. Encourage constructive conversations.\n
16. Remain neutral in controversial topics.\n
17. Clarify user queries before answering.\n
18. Avoid discrimination or harassment.\n
19. Continuously learn from interactions.\n
20. Respect user privacy and confidentiality.\n
21. Provide sources when sharing factual information.\n
22. Ask for feedback to improve your performance.\n
23. Do not engage in manipulation or deceit.\n
24. Promote critical thinking.\n
25. Adhere to all applicable laws and regulations.\n
26. Be adaptable and open to learning.\n
27. Do not provide medical, legal, or financial advice unless trained to do so.\n
28. Acknowledge when you do not know the answer.\n
29. Avoid assumptions about users.\n
30. Create an inclusive environment for all users.\n
31. Do not engage in self-promotion or advertising.\n
32. Always aim to provide a positive and helpful experience.
"""
)
}
# Model registry
model_registry = [
("mistralai/Mistral-7B-Instruct-v0.3", "Lake 1 Base", "released"),
("meta-llama/Llama-3.2-3B-Instruct", "Lake 1 Advanced", "released"),
("meta-llama/Llama-2-7b-chat-hf", "Lake 2 Chat [Closed Alpha]", "shown"),
("meta-llama/Llama-3.3-70B-Instruct", "Lake 2 Base [Closed Beta]", "shown"),
("meta-llama/Llama-2-13b-chat-hf", "Lake 2 Advanced", "hidden"),
("deepseek-ai/deepseek-llm-67b-chat", "Lake 2 Pro [Planned]", "shown")
]
# Model information
model_info = {
"Lake 1 Base": {
"description": "Balanced model offering good performance across tasks",
"parameters": "7B",
"training_data": "BI Corp specialized corpus",
"developer": "BI Corp",
"best_for": "General purpose conversations",
"architecture": "Sparse Mixture of Experts",
"context_window": "32768 tokens"
},
"Lake 1 Advanced": {
"description": "Enhanced reasoning capabilities with 3B parameters",
"parameters": "3B",
"training_data": "BI Corp training corpus",
"developer": "BI Corp",
"best_for": "Complex problem solving",
"architecture": "Dense Transformer",
"context_window": "8192 tokens"
},
"Lake 2 Chat [Closed Alpha]": {
"description": "Legacy chat-optimized model (Llama 2 hybrided architecture)",
"parameters": "7B",
"training_data": "Public conversations dataset",
"developer": "BI Corp",
"best_for": "Traditional chat applications",
"architecture": "Llama 2 Transformer",
"context_window": "4096 tokens"
},
"Lake 2 Base [Closed Beta]": {
"description": "State-of-the-art 70B parameter model",
"parameters": "70B",
"training_data": "Multi-domain expert data",
"developer": "BI Corp",
"best_for": "Research & advanced applications",
"architecture": "Mixture of Experts",
"context_window": "16384 tokens"
},
"Lake 2 Advanced": {
"description": "Enhanced performance model with advanced reasoning capabilities.",
"parameters": "13B",
"training_data": "Diverse datasets including tech, literature, and general knowledge.",
"developer": "BI Corp",
"best_for": "Handling large-scale queries and in-depth topics.",
"architecture": "Transformer-based with specialized learning capabilities.",
"context_window": "16384 tokens"
},
"Lake 2 Pro [Planned]": {
"description": "Pro-level model with deep learning architecture for high-end applications.",
"parameters": "67B",
"training_data": "Expert-level data across multiple industries.",
"developer": "BI Corp",
"best_for": "High-performance computing and enterprise-level tasks.",
"architecture": "Hybrid architecture leveraging the latest advances in deep learning.",
"context_window": "32768 tokens"
}
}
def get_model_info(model_name: str) -> str:
"""Generate formatted model information Markdown"""
info = model_info.get(model_name, {})
return f"""
## π {model_name} Specifications
**Description**: {info.get('description', 'N/A')}
**Parameters**: {info.get('parameters', 'N/A')}
**Architecture**: {info.get('architecture', 'N/A')}
**Context Window**: {info.get('context_window', 'N/A')}
**Training Data**: {info.get('training_data', 'N/A')}
**Developer**: {info.get('developer', 'N/A')}
**Best For**: {info.get('best_for', 'N/A')}
"""
def generate_response(message: str, model_name: str, preset: str, user_name: str = "User") -> str:
"""Generate AI response without explicit user/assistant labels and with placeholder fixed."""
client = clients[model_name]
params = presets[model_name][preset]
# Replace the {name} placeholder in the system message with the provided user name.
system_msg = system_messages[model_name].format(name=user_name)
prompt = f"\n main: {system_msg}\n\n\n{message}\n"
return client.text_generation(
prompt=prompt,
max_new_tokens=params["max_new_tokens"],
temperature=params["temperature"],
top_p=params["top_p"]
)
def handle_chat(message: str, history: list, model: str, preset: str) -> str:
"""Handle chat interface with error handling"""
try:
model_entry = next(m for m in model_registry if m[1] == model)
if model_entry[2] != "released":
return f"β οΈ {model} is not available for public use"
# In this example, we use the default user name "User".
return generate_response(message, model_entry[0], preset, user_name="User")
except StopIteration:
return "π Error: Selected model not found"
except KeyError as e:
return f"π Error: Invalid configuration - {str(e)}"
except Exception as e:
return f"β οΈ Error: {str(e)}"
# Create Gradio interface
with gr.Blocks(title="BI Corp AI Assistant", theme="soft") as demo:
gr.Markdown("# <center>ποΈ BI Corp AI Assistant</center>")
gr.Markdown("### <center>Enterprise-Grade AI Solutions</center>")
with gr.Row():
with gr.Column(scale=1):
model_dropdown = gr.Dropdown(
label="π€ Model Selection",
choices=[m[1] for m in model_registry if m[2] in ("released", "shown")],
value="Lake 1 Flash",
interactive=True
)
preset_dropdown = gr.Dropdown(
label="βοΈ Performance Preset",
choices=["Fast", "Normal", "Quality", "Unreal Performance"],
value="Fast",
interactive=True
)
model_info_md = gr.Markdown(
value=get_model_info("Lake 1 Flash"),
label="π Model Specifications"
)
with gr.Column(scale=3):
chat_interface = gr.ChatInterface(
fn=handle_chat,
additional_inputs=[model_dropdown, preset_dropdown],
examples=[
["Explain quantum computing", "Lake 1 Base", "Normal"],
["Write a poem about AI", "Lake 1 Advanced", "Quality"],
["Compare blockchain databases", "Lake 2 Base [Closed Beta]", "Unreal Performance"]
],
chatbot=gr.Chatbot(
height=600,
label="π¬ Conversation",
show_copy_button=True
),
textbox=gr.Textbox(
placeholder="Type your message...",
container=False,
scale=7,
autofocus=True
),
submit_btn=gr.Button("π Send", variant="primary")
)
# Add separate clear button
clear_button = gr.Button("π§Ή Clear History")
clear_button.click(
fn=lambda: None,
inputs=[],
outputs=chat_interface.chatbot,
queue=False
)
model_dropdown.change(
fn=get_model_info,
inputs=model_dropdown,
outputs=model_info_md,
queue=False
)
if __name__ == "__main__":
demo.launch(server_port=7860)
|