hadadrjt commited on
Commit
2ca269f
·
1 Parent(s): cd91f10

ai: Allow J.A.R.V.I.S. to get current date.

Browse files
Files changed (1) hide show
  1. src/cores/client.py +18 -2
src/cores/client.py CHANGED
@@ -12,6 +12,7 @@ import uuid
12
  from src.config import *
13
  from src.cores.server import fetch_response_stream_async
14
  from src.cores.session import ensure_stop_event, get_model_key
 
15
 
16
  async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt, deep_search):
17
  """
@@ -31,6 +32,18 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
31
  model_key = get_model_key(model_display, MODEL_MAPPING, DEFAULT_MODEL_KEY)
32
  cfg = MODEL_CONFIG.get(model_key, DEFAULT_CONFIG)
33
  msgs = []
 
 
 
 
 
 
 
 
 
 
 
 
34
  # If deep search enabled and using primary model, prepend deep search instructions and results
35
  if deep_search and model_display == MODEL_CHOICES[0]:
36
  msgs.append({"role": "system", "content": DEEP_SEARCH_INSTRUCTIONS})
@@ -57,21 +70,24 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
57
  except Exception:
58
  # Fail silently if deep search fails
59
  pass
60
- msgs.append({"role": "system", "content": INTERNAL_AI_INSTRUCTIONS})
61
  elif model_display == MODEL_CHOICES[0]:
62
  # For primary model without deep search, use internal instructions
63
- msgs.append({"role": "system", "content": INTERNAL_AI_INSTRUCTIONS})
64
  else:
65
  # For other models, use default instructions
66
  msgs.append({"role": "system", "content": custom_prompt or SYSTEM_PROMPT_MAPPING.get(model_key, SYSTEM_PROMPT_DEFAULT)})
 
67
  # Append conversation history alternating user and assistant messages
68
  msgs.extend([{"role": "user", "content": u} for u, _ in history])
69
  msgs.extend([{"role": "assistant", "content": a} for _, a in history if a])
70
  # Append current user input
71
  msgs.append({"role": "user", "content": user_input})
 
72
  # Shuffle provider hosts and keys for load balancing and fallback
73
  candidates = [(h, k) for h in LINUX_SERVER_HOSTS for k in LINUX_SERVER_PROVIDER_KEYS]
74
  random.shuffle(candidates)
 
75
  # Try each host-key pair until a successful response is received
76
  for h, k in candidates:
77
  stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id, sess.stop_event, sess.cancel_token)
 
12
  from src.config import *
13
  from src.cores.server import fetch_response_stream_async
14
  from src.cores.session import ensure_stop_event, get_model_key
15
+ from datetime import datetime
16
 
17
  async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt, deep_search):
18
  """
 
32
  model_key = get_model_key(model_display, MODEL_MAPPING, DEFAULT_MODEL_KEY)
33
  cfg = MODEL_CONFIG.get(model_key, DEFAULT_CONFIG)
34
  msgs = []
35
+
36
+ # Get current date
37
+ current_date = datetime.now().strftime("%A, %B %d, %Y, %I:%M %p %Z")
38
+
39
+ # Instructions
40
+ COMBINED_AI_INSTRUCTIONS = (
41
+ INTERNAL_AI_INSTRUCTIONS
42
+ + "\n\n\n"
43
+ + f"Today is: {current_date}"
44
+ + "\n\n\n"
45
+ )
46
+
47
  # If deep search enabled and using primary model, prepend deep search instructions and results
48
  if deep_search and model_display == MODEL_CHOICES[0]:
49
  msgs.append({"role": "system", "content": DEEP_SEARCH_INSTRUCTIONS})
 
70
  except Exception:
71
  # Fail silently if deep search fails
72
  pass
73
+ msgs.append({"role": "system", "content": COMBINED_AI_INSTRUCTIONS})
74
  elif model_display == MODEL_CHOICES[0]:
75
  # For primary model without deep search, use internal instructions
76
+ msgs.append({"role": "system", "content": COMBINED_AI_INSTRUCTIONS})
77
  else:
78
  # For other models, use default instructions
79
  msgs.append({"role": "system", "content": custom_prompt or SYSTEM_PROMPT_MAPPING.get(model_key, SYSTEM_PROMPT_DEFAULT)})
80
+
81
  # Append conversation history alternating user and assistant messages
82
  msgs.extend([{"role": "user", "content": u} for u, _ in history])
83
  msgs.extend([{"role": "assistant", "content": a} for _, a in history if a])
84
  # Append current user input
85
  msgs.append({"role": "user", "content": user_input})
86
+
87
  # Shuffle provider hosts and keys for load balancing and fallback
88
  candidates = [(h, k) for h in LINUX_SERVER_HOSTS for k in LINUX_SERVER_PROVIDER_KEYS]
89
  random.shuffle(candidates)
90
+
91
  # Try each host-key pair until a successful response is received
92
  for h, k in candidates:
93
  stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id, sess.stop_event, sess.cancel_token)