Niansuh commited on
Commit
9ca9231
·
verified ·
1 Parent(s): e26a971

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +10 -15
api/utils.py CHANGED
@@ -64,22 +64,19 @@ async def process_streaming_response(request: ChatRequest):
64
  agent_mode = AGENT_MODE.get(request.model, {})
65
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
66
 
67
- # Log only necessary information
68
  logger.info(
69
- f"Received streaming request for model: '{request.model}', "
70
- f"stream: {request.stream}, temperature: {request.temperature}, "
71
- f"top_p: {request.top_p}, max_tokens: {request.max_tokens}, "
72
- f"number_of_messages: {len(request.messages)}"
73
  )
74
- logger.info(f"Using agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}")
75
 
76
  json_data = {
77
  "messages": [message_to_dict(msg) for msg in request.messages],
78
  "previewToken": None,
79
  "userId": None,
80
  "codeModelMode": True,
81
- "agentMode": agent_mode, # Use agentMode
82
- "trendingAgentMode": trending_agent_mode, # Use trendingAgentMode
83
  "isMicMode": False,
84
  "userSystemPrompt": None,
85
  "maxTokens": request.max_tokens,
@@ -134,21 +131,19 @@ async def process_non_streaming_response(request: ChatRequest):
134
  agent_mode = AGENT_MODE.get(request.model, {})
135
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
136
 
137
- # Log only necessary information
138
  logger.info(
139
- f"Received non-streaming request for model: '{request.model}', "
140
- f"max_tokens: {request.max_tokens}, temperature: {request.temperature}, "
141
- f"top_p: {request.top_p}, number_of_messages: {len(request.messages)}"
142
  )
143
- logger.info(f"Using agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}")
144
 
145
  json_data = {
146
  "messages": [message_to_dict(msg) for msg in request.messages],
147
  "previewToken": None,
148
  "userId": None,
149
  "codeModelMode": True,
150
- "agentMode": agent_mode, # Use agentMode
151
- "trendingAgentMode": trending_agent_mode, # Use trendingAgentMode
152
  "isMicMode": False,
153
  "userSystemPrompt": None,
154
  "maxTokens": request.max_tokens,
 
64
  agent_mode = AGENT_MODE.get(request.model, {})
65
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
66
 
67
+ # Log reduced information
68
  logger.info(
69
+ f"Streaming request for model: '{request.model}', "
70
+ f"using agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
 
 
71
  )
 
72
 
73
  json_data = {
74
  "messages": [message_to_dict(msg) for msg in request.messages],
75
  "previewToken": None,
76
  "userId": None,
77
  "codeModelMode": True,
78
+ "agentMode": agent_mode,
79
+ "trendingAgentMode": trending_agent_mode,
80
  "isMicMode": False,
81
  "userSystemPrompt": None,
82
  "maxTokens": request.max_tokens,
 
131
  agent_mode = AGENT_MODE.get(request.model, {})
132
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
133
 
134
+ # Log reduced information
135
  logger.info(
136
+ f"Non-streaming request for model: '{request.model}', "
137
+ f"using agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
 
138
  )
 
139
 
140
  json_data = {
141
  "messages": [message_to_dict(msg) for msg in request.messages],
142
  "previewToken": None,
143
  "userId": None,
144
  "codeModelMode": True,
145
+ "agentMode": agent_mode,
146
+ "trendingAgentMode": trending_agent_mode,
147
  "isMicMode": False,
148
  "userSystemPrompt": None,
149
  "maxTokens": request.max_tokens,