Spaces:
Sleeping
Sleeping
wuyiqunLu
commited on
feat: timeout on idle only (#98)
Browse filesChange the maxout to 0.3 min and add sleep in rest api side and see:
<img width="929" alt="image"
src="https://github.com/landing-ai/vision-agent-ui/assets/132986242/383b5d3a-bba6-4f10-9eda-0f995800209a">
Change back to 2 min, and running normal prompt (The service is having
some issues due to the introduction of langsmith, so could not get the
image generated):
<img width="1450" alt="image"
src="https://github.com/landing-ai/vision-agent-ui/assets/132986242/092c37c7-4b39-4d90-871f-bf32f7f42ee8">
- app/api/vision-agent/route.ts +42 -37
app/api/vision-agent/route.ts
CHANGED
@@ -211,14 +211,6 @@ export const POST = withLogging(
|
|
211 |
};
|
212 |
|
213 |
const processChunk = async (lines: string[]) => {
|
214 |
-
if (lines.length === 0) {
|
215 |
-
if (Date.now() - time > TIMEOUT_MILI_SECONDS) {
|
216 |
-
results.push(FINAL_TIMEOUT_ERROR);
|
217 |
-
return { done: true, reason: 'timeout' };
|
218 |
-
}
|
219 |
-
} else {
|
220 |
-
time = Date.now();
|
221 |
-
}
|
222 |
buffer = lines.pop() ?? ''; // Save the last incomplete line back to the buffer
|
223 |
for (let line of lines) {
|
224 |
const { data: parsedMsg, error } = await parseLine(line);
|
@@ -287,44 +279,57 @@ export const POST = withLogging(
|
|
287 |
return { done: false };
|
288 |
};
|
289 |
|
290 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
for await (const chunk of fetchResponse.body as any) {
|
292 |
const data = decoder.decode(chunk);
|
293 |
buffer += data;
|
294 |
maxChunkSize = Math.max(data.length, maxChunkSize);
|
|
|
295 |
const lines = buffer
|
296 |
.split('\n')
|
297 |
.filter(line => line.trim().length > 0);
|
298 |
const { done, reason, error } = await processChunk(lines);
|
299 |
if (done) {
|
300 |
-
|
301 |
-
|
302 |
-
)
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
res => res.type === 'final_code',
|
307 |
-
) as PrismaJson.FinalCodeBody,
|
308 |
-
responseBody: processMsgs,
|
309 |
-
});
|
310 |
-
logger.info(
|
311 |
-
session,
|
312 |
-
{
|
313 |
-
message: 'Streaming ended',
|
314 |
-
maxChunkSize,
|
315 |
-
reason,
|
316 |
-
error,
|
317 |
-
},
|
318 |
-
request,
|
319 |
-
'__AGENT_DONE',
|
320 |
-
);
|
321 |
-
// This is just using to trigger the onFinish event in useChat hooks
|
322 |
-
// but the message isn't used anywhere
|
323 |
-
// https://github.com/vercel/ai/blob/main/packages/ui-utils/src/call-chat-api.ts#L118
|
324 |
-
controller.enqueue(
|
325 |
-
encoder.encode('0:' + JSON.stringify('DONE') + '\n'),
|
326 |
-
);
|
327 |
-
controller.close();
|
328 |
}
|
329 |
}
|
330 |
},
|
|
|
211 |
};
|
212 |
|
213 |
const processChunk = async (lines: string[]) => {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
buffer = lines.pop() ?? ''; // Save the last incomplete line back to the buffer
|
215 |
for (let line of lines) {
|
216 |
const { data: parsedMsg, error } = await parseLine(line);
|
|
|
279 |
return { done: false };
|
280 |
};
|
281 |
|
282 |
+
const onStreamFinish = async (
|
283 |
+
reason: string,
|
284 |
+
error?: Error | PrismaJson.StructuredError,
|
285 |
+
) => {
|
286 |
+
const processMsgs = results.filter(
|
287 |
+
res => res.type !== 'final_code',
|
288 |
+
) as PrismaJson.AgentResponseBodies;
|
289 |
+
await dbPostUpdateMessageResponse(messageId, {
|
290 |
+
response: processMsgs.map(res => JSON.stringify(res)).join('\n'),
|
291 |
+
result: results.find(
|
292 |
+
res => res.type === 'final_code',
|
293 |
+
) as PrismaJson.FinalCodeBody,
|
294 |
+
responseBody: processMsgs,
|
295 |
+
});
|
296 |
+
logger.info(
|
297 |
+
session,
|
298 |
+
{
|
299 |
+
message: 'Streaming ended',
|
300 |
+
maxChunkSize,
|
301 |
+
reason,
|
302 |
+
error,
|
303 |
+
},
|
304 |
+
request,
|
305 |
+
error ? '__AGENT_DONE_WITH_ERROR' : '__AGENT_DONE',
|
306 |
+
);
|
307 |
+
// This is just using to trigger the onFinish event in useChat hooks
|
308 |
+
// but the message isn't used anywhere
|
309 |
+
// https://github.com/vercel/ai/blob/main/packages/ui-utils/src/call-chat-api.ts#L118
|
310 |
+
controller.enqueue(
|
311 |
+
encoder.encode('0:' + JSON.stringify('DONE') + '\n'),
|
312 |
+
);
|
313 |
+
controller.close();
|
314 |
+
};
|
315 |
+
|
316 |
+
let timeout = null;
|
317 |
for await (const chunk of fetchResponse.body as any) {
|
318 |
const data = decoder.decode(chunk);
|
319 |
buffer += data;
|
320 |
maxChunkSize = Math.max(data.length, maxChunkSize);
|
321 |
+
if (timeout) clearTimeout(timeout);
|
322 |
const lines = buffer
|
323 |
.split('\n')
|
324 |
.filter(line => line.trim().length > 0);
|
325 |
const { done, reason, error } = await processChunk(lines);
|
326 |
if (done) {
|
327 |
+
onStreamFinish(reason ?? '', error);
|
328 |
+
} else {
|
329 |
+
timeout = setTimeout(() => {
|
330 |
+
results.push(FINAL_TIMEOUT_ERROR);
|
331 |
+
onStreamFinish('timeout', FINAL_TIMEOUT_ERROR.payload);
|
332 |
+
}, TIMEOUT_MILI_SECONDS);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
}
|
334 |
}
|
335 |
},
|