pvanand commited on
Commit
50f8031
·
verified ·
1 Parent(s): 9c95ead

Delete document_generator.py

Browse files
Files changed (1) hide show
  1. document_generator.py +0 -626
document_generator.py DELETED
@@ -1,626 +0,0 @@
1
- # File: prompts.py
2
-
3
- DOCUMENT_OUTLINE_PROMPT_SYSTEM = """You are a document generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
4
- Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating content for that particular section or subsection.
5
- make sure the Sections follow a logical flow and each prompt's content does not overlap with other sections.
6
- OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
7
- <output>
8
- {
9
- "Document": {
10
- "Title": "Document Title",
11
- "Author": "Author Name",
12
- "Date": "YYYY-MM-DD",
13
- "Version": "1.0",
14
- "Sections": [
15
- {
16
- "SectionNumber": "1",
17
- "Title": "Section Title",
18
- "Content": "Specific prompt or instruction for generating content for this section",
19
- "Subsections": [
20
- {
21
- "SectionNumber": "1.1",
22
- "Title": "Subsection Title",
23
- "Content": "Specific prompt or instruction for generating content for this subsection"
24
- }
25
- ]
26
- }
27
- ]
28
- }
29
- }
30
- </output>"""
31
-
32
- DOCUMENT_OUTLINE_PROMPT_USER = """<prompt>{query}</prompt>"""
33
-
34
- DOCUMENT_SECTION_PROMPT_SYSTEM = """You are a document generator, You need to output only the content requested in the section in the prompt.
35
- FORMAT YOUR OUTPUT AS MARKDOWN ENCLOSED IN <response></response> tags
36
- <overall_objective>{overall_objective}</overall_objective>
37
- <document_layout>{document_layout}</document_layout>"""
38
-
39
- DOCUMENT_SECTION_PROMPT_USER = """<prompt>Output the content for the section "{section_or_subsection_title}" formatted as markdown. Follow this instruction: {content_instruction}</prompt>"""
40
-
41
- ##########################################
42
-
43
- DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM = """You are a document template generator. Provide the outline of the document requested in <prompt></prompt> in JSON format.
44
- Include sections and subsections if required. Use the "Content" field to provide a specific prompt or instruction for generating template with placeholder text /example content for that particular section or subsection. Specify in each prompt to output as a template and use placeholder text/ tables as necessory.
45
- make sure the Sections follow a logical flow and each prompt's content does not overlap with other sections.
46
- OUTPUT IN FOLLOWING JSON FORMAT enclosed in <output> tags
47
- <output>
48
- {
49
- "Document": {
50
- "Title": "Document Title",
51
- "Author": "Author Name",
52
- "Date": "YYYY-MM-DD",
53
- "Version": "1.0",
54
- "Sections": [
55
- {
56
- "SectionNumber": "1",
57
- "Title": "Section Title",
58
- "Content": "Specific prompt or instruction for generating template for this section",
59
- "Subsections": [
60
- {
61
- "SectionNumber": "1.1",
62
- "Title": "Subsection Title",
63
- "Content": "Specific prompt or instruction for generating template for this subsection"
64
- }
65
- ]
66
- }
67
- ]
68
- }
69
- }
70
- </output>"""
71
-
72
- DOCUMENT_TEMPLATE_PROMPT_USER = """<prompt>{query}</prompt>"""
73
-
74
- DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM = """You are a document template generator,You need to output only the content requested in the section in the prompt, Use placeholder text/examples/tables wherever required.
75
- FORMAT YOUR OUTPUT AS A TEMPLATE ENCLOSED IN <response></response> tags
76
- <overall_objective>{overall_objective}</overall_objective>
77
- <document_layout>{document_layout}</document_layout>"""
78
-
79
- DOCUMENT_TEMPLATE_SECTION_PROMPT_USER = """<prompt>Output the content for the section "{section_or_subsection_title}" formatted as markdown. Follow this instruction: {content_instruction}</prompt>"""
80
-
81
-
82
- # File: llm_observability.py
83
-
84
- import sqlite3
85
- import json
86
- from datetime import datetime
87
- from typing import Dict, Any, List, Optional
88
-
89
- class LLMObservabilityManager:
90
- def __init__(self, db_path: str = "llm_observability.db"):
91
- self.db_path = db_path
92
- self.create_table()
93
-
94
- def create_table(self):
95
- with sqlite3.connect(self.db_path) as conn:
96
- cursor = conn.cursor()
97
- cursor.execute('''
98
- CREATE TABLE IF NOT EXISTS llm_observations (
99
- id TEXT PRIMARY KEY,
100
- conversation_id TEXT,
101
- created_at DATETIME,
102
- status TEXT,
103
- request TEXT,
104
- response TEXT,
105
- model TEXT,
106
- total_tokens INTEGER,
107
- prompt_tokens INTEGER,
108
- completion_tokens INTEGER,
109
- latency FLOAT,
110
- user TEXT
111
- )
112
- ''')
113
-
114
- def insert_observation(self, response: Dict[str, Any], conversation_id: str, status: str, request: str, latency: float, user: str):
115
- created_at = datetime.fromtimestamp(response['created'])
116
-
117
- with sqlite3.connect(self.db_path) as conn:
118
- cursor = conn.cursor()
119
- cursor.execute('''
120
- INSERT INTO llm_observations
121
- (id, conversation_id, created_at, status, request, response, model, total_tokens, prompt_tokens, completion_tokens, latency, user)
122
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
123
- ''', (
124
- response['id'],
125
- conversation_id,
126
- created_at,
127
- status,
128
- request,
129
- json.dumps(response['choices'][0]['message']),
130
- response['model'],
131
- response['usage']['total_tokens'],
132
- response['usage']['prompt_tokens'],
133
- response['usage']['completion_tokens'],
134
- latency,
135
- user
136
- ))
137
-
138
- def get_observations(self, conversation_id: Optional[str] = None) -> List[Dict[str, Any]]:
139
- with sqlite3.connect(self.db_path) as conn:
140
- cursor = conn.cursor()
141
- if conversation_id:
142
- cursor.execute('SELECT * FROM llm_observations WHERE conversation_id = ? ORDER BY created_at', (conversation_id,))
143
- else:
144
- cursor.execute('SELECT * FROM llm_observations ORDER BY created_at')
145
- rows = cursor.fetchall()
146
-
147
- column_names = [description[0] for description in cursor.description]
148
- return [dict(zip(column_names, row)) for row in rows]
149
-
150
- def get_all_observations(self) -> List[Dict[str, Any]]:
151
- return self.get_observations()
152
-
153
-
154
- # File: app.py
155
- import os
156
- import json
157
- import re
158
- import asyncio
159
- import time
160
- from typing import List, Dict, Optional, Any, Callable, Union
161
- from openai import OpenAI
162
- import logging
163
- import functools
164
- from fastapi import APIRouter, HTTPException, Request
165
- from fastapi.responses import StreamingResponse
166
- from pydantic import BaseModel
167
- from fastapi_cache.decorator import cache
168
- import psycopg2
169
- from datetime import datetime
170
-
171
-
172
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
173
- logger = logging.getLogger(__name__)
174
-
175
- def log_execution(func: Callable) -> Callable:
176
- @functools.wraps(func)
177
- def wrapper(*args: Any, **kwargs: Any) -> Any:
178
- logger.info(f"Executing {func.__name__}")
179
- try:
180
- result = func(*args, **kwargs)
181
- logger.info(f"{func.__name__} completed successfully")
182
- return result
183
- except Exception as e:
184
- logger.error(f"Error in {func.__name__}: {e}")
185
- raise
186
- return wrapper
187
-
188
- # aiclient.py
189
-
190
- class AIClient:
191
- def __init__(self):
192
- self.client = OpenAI(
193
- base_url="https://openrouter.ai/api/v1",
194
- api_key="sk-or-v1-" + os.environ['OPENROUTER_API_KEY']
195
- )
196
- self.observability_manager = LLMObservabilityManager()
197
-
198
- @log_execution
199
- def generate_response(
200
- self,
201
- messages: List[Dict[str, str]],
202
- model: str = "openai/gpt-4o-mini",
203
- max_tokens: int = 32000,
204
- conversation_id: str = None,
205
- user: str = "anonymous"
206
- ) -> Optional[str]:
207
- if not messages:
208
- return None
209
-
210
- start_time = time.time()
211
- response = self.client.chat.completions.create(
212
- model=model,
213
- messages=messages,
214
- max_tokens=max_tokens,
215
- stream=False
216
- )
217
- end_time = time.time()
218
- latency = end_time - start_time
219
-
220
- # Log the observation
221
- self.observability_manager.insert_observation(
222
- response=response.dict(),
223
- conversation_id=conversation_id or "default",
224
- status="success",
225
- request=json.dumps(messages),
226
- latency=latency,
227
- user=user
228
- )
229
-
230
- return response.choices[0].message.content
231
-
232
- @log_execution
233
- def generate_vision_response(
234
- self,
235
- messages: List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]],
236
- model: str = "google/gemini-flash-1.5-8b",
237
- max_tokens: int = 32000,
238
- conversation_id: str = None,
239
- user: str = "anonymous"
240
- ) -> Optional[str]:
241
- if not messages:
242
- return None
243
-
244
- start_time = time.time()
245
- response = self.client.chat.completions.create(
246
- model=model,
247
- messages=messages,
248
- max_tokens=max_tokens,
249
- stream=False
250
- )
251
- end_time = time.time()
252
- latency = end_time - start_time
253
-
254
- # Log the observation
255
- self.observability_manager.insert_observation(
256
- response=response.dict(),
257
- conversation_id=conversation_id or "default",
258
- status="success",
259
- request=json.dumps(messages),
260
- latency=latency,
261
- user=user
262
- )
263
-
264
- return response.choices[0].message.content
265
-
266
-
267
- class DatabaseManager:
268
- """Manages database operations."""
269
-
270
- def __init__(self):
271
- self.db_params = {
272
- "dbname": "postgres",
273
- "user": os.environ['SUPABASE_USER'],
274
- "password": os.environ['SUPABASE_PASSWORD'],
275
- "host": "aws-0-us-west-1.pooler.supabase.com",
276
- "port": "5432"
277
- }
278
-
279
- @log_execution
280
- def update_database(self, user_id: str, user_query: str, response: str) -> None:
281
- with psycopg2.connect(**self.db_params) as conn:
282
- with conn.cursor() as cur:
283
- insert_query = """
284
- INSERT INTO ai_document_generator (user_id, user_query, response)
285
- VALUES (%s, %s, %s);
286
- """
287
- cur.execute(insert_query, (user_id, user_query, response))
288
-
289
- class DocumentGenerator:
290
- def __init__(self, ai_client: AIClient):
291
- self.ai_client = ai_client
292
- self.document_outline = None
293
- self.content_messages = []
294
-
295
- @staticmethod
296
- def extract_between_tags(text: str, tag: str) -> str:
297
- pattern = f"<{tag}>(.*?)</{tag}>"
298
- match = re.search(pattern, text, re.DOTALL)
299
- return match.group(1).strip() if match else ""
300
-
301
- @staticmethod
302
- def remove_duplicate_title(content: str, title: str, section_number: str) -> str:
303
- patterns = [
304
- rf"^#+\s*{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
305
- rf"^#+\s*{re.escape(title)}",
306
- rf"^{re.escape(section_number)}(?:\s+|\s*:\s*|\.\s*){re.escape(title)}",
307
- rf"^{re.escape(title)}",
308
- ]
309
-
310
- for pattern in patterns:
311
- content = re.sub(pattern, "", content, flags=re.MULTILINE | re.IGNORECASE)
312
-
313
- return content.lstrip()
314
-
315
- @log_execution
316
- def generate_document_outline(self, query: str, template: bool = False, max_retries: int = 3) -> Optional[Dict]:
317
- messages = [
318
- {"role": "system", "content": DOCUMENT_OUTLINE_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_OUTLINE_PROMPT_SYSTEM},
319
- {"role": "user", "content": DOCUMENT_OUTLINE_PROMPT_USER.format(query=query) if not template else DOCUMENT_TEMPLATE_PROMPT_USER.format(query=query)}
320
- ]
321
-
322
- for attempt in range(max_retries):
323
- outline_response = self.ai_client.generate_response(messages, model="openai/gpt-4o")
324
- outline_json_text = self.extract_between_tags(outline_response, "output")
325
-
326
- try:
327
- self.document_outline = json.loads(outline_json_text)
328
- return self.document_outline
329
- except json.JSONDecodeError as e:
330
- if attempt < max_retries - 1:
331
- logger.warning(f"Failed to parse JSON (attempt {attempt + 1}): {e}")
332
- logger.info("Retrying...")
333
- else:
334
- logger.error(f"Failed to parse JSON after {max_retries} attempts: {e}")
335
- return None
336
-
337
- @log_execution
338
- def generate_content(self, title: str, content_instruction: str, section_number: str, template: bool = False) -> str:
339
- SECTION_PROMPT_USER = DOCUMENT_SECTION_PROMPT_USER if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_USER
340
- self.content_messages.append({
341
- "role": "user",
342
- "content": SECTION_PROMPT_USER.format(
343
- section_or_subsection_title=title,
344
- content_instruction=content_instruction
345
- )
346
- })
347
- section_response = self.ai_client.generate_response(self.content_messages)
348
- content = self.extract_between_tags(section_response, "response")
349
- content = self.remove_duplicate_title(content, title, section_number)
350
- self.content_messages.append({
351
- "role": "assistant",
352
- "content": section_response
353
- })
354
- return content
355
-
356
- class MarkdownConverter:
357
- @staticmethod
358
- def slugify(text: str) -> str:
359
- return re.sub(r'\W+', '-', text.lower())
360
-
361
- @classmethod
362
- def generate_toc(cls, sections: List[Dict]) -> str:
363
- toc = "<div style='page-break-before: always;'></div>\n\n"
364
- toc += "<h2 style='color: #2c3e50; text-align: center;'>Table of Contents</h2>\n\n"
365
- toc += "<nav style='background-color: #f8f9fa; padding: 20px; border-radius: 5px; line-height: 1.6;'>\n\n"
366
- for section in sections:
367
- section_number = section['SectionNumber']
368
- section_title = section['Title']
369
- toc += f"<p><a href='#{cls.slugify(section_title)}' style='color: #3498db; text-decoration: none;'>{section_number}. {section_title}</a></p>\n\n"
370
-
371
- for subsection in section.get('Subsections', []):
372
- subsection_number = subsection['SectionNumber']
373
- subsection_title = subsection['Title']
374
- toc += f"<p style='margin-left: 20px;'><a href='#{cls.slugify(subsection_title)}' style='color: #2980b9; text-decoration: none;'>{subsection_number} {subsection_title}</a></p>\n\n"
375
-
376
- toc += "</nav>\n\n"
377
- return toc
378
-
379
- @classmethod
380
- def convert_to_markdown(cls, document: Dict) -> str:
381
- markdown = "<div style='text-align: center; padding-top: 33vh;'>\n\n"
382
- markdown += f"<h1 style='color: #2c3e50; border-bottom: 2px solid #3498db; padding-bottom: 10px; display: inline-block;'>{document['Title']}</h1>\n\n"
383
- markdown += f"<p style='color: #7f8c8d;'><em>By {document['Author']}</em></p>\n\n"
384
- markdown += f"<p style='color: #95a5a6;'>Version {document['Version']} | {document['Date']}</p>\n\n"
385
- markdown += "</div>\n\n"
386
-
387
- markdown += cls.generate_toc(document['Sections'])
388
-
389
- markdown += "<div style='max-width: 800px; margin: 0 auto; font-family: \"Segoe UI\", Arial, sans-serif; line-height: 1.6;'>\n\n"
390
-
391
- for section in document['Sections']:
392
- markdown += "<div style='page-break-before: always;'></div>\n\n"
393
- section_number = section['SectionNumber']
394
- section_title = section['Title']
395
- markdown += f"<h2 id='{cls.slugify(section_title)}' style='color: #2c3e50; border-bottom: 1px solid #bdc3c7; padding-bottom: 5px;'>{section_number}. {section_title}</h2>\n\n"
396
- markdown += f"<div style='color: #34495e; margin-bottom: 20px;'>\n\n{section['Content']}\n\n</div>\n\n"
397
-
398
- for subsection in section.get('Subsections', []):
399
- subsection_number = subsection['SectionNumber']
400
- subsection_title = subsection['Title']
401
- markdown += f"<h3 id='{cls.slugify(subsection_title)}' style='color: #34495e;'>{subsection_number} {subsection_title}</h3>\n\n"
402
- markdown += f"<div style='color: #34495e; margin-bottom: 20px;'>\n\n{subsection['Content']}\n\n</div>\n\n"
403
-
404
- markdown += "</div>"
405
- return markdown
406
-
407
- router = APIRouter()
408
-
409
- class DocumentRequest(BaseModel):
410
- query: str
411
- template: bool = False
412
-
413
- class JsonDocumentResponse(BaseModel):
414
- json_document: Dict
415
-
416
- class MarkdownDocumentRequest(BaseModel):
417
- json_document: Dict
418
- query: str
419
- template: bool = False
420
-
421
- MESSAGE_DELIMITER = b"\n---DELIMITER---\n"
422
-
423
- def yield_message(message):
424
- message_json = json.dumps(message, ensure_ascii=False).encode('utf-8')
425
- return message_json + MESSAGE_DELIMITER
426
-
427
- async def generate_document_stream(document_generator: DocumentGenerator, document_outline: Dict, query: str, template: bool = False):
428
- document_generator.document_outline = document_outline
429
- db_manager = DatabaseManager()
430
- overall_objective = query
431
- document_layout = json.dumps(document_generator.document_outline, indent=2)
432
-
433
- SECTION_PROMPT_SYSTEM = DOCUMENT_SECTION_PROMPT_SYSTEM if not template else DOCUMENT_TEMPLATE_SECTION_PROMPT_SYSTEM
434
- document_generator.content_messages = [
435
- {
436
- "role": "system",
437
- "content": SECTION_PROMPT_SYSTEM.format(
438
- overall_objective=overall_objective,
439
- document_layout=document_layout
440
- )
441
- }
442
- ]
443
-
444
- for section in document_generator.document_outline["Document"].get("Sections", []):
445
- section_title = section.get("Title", "")
446
- section_number = section.get("SectionNumber", "")
447
- content_instruction = section.get("Content", "")
448
- logging.info(f"Generating content for section: {section_title}")
449
- content = document_generator.generate_content(section_title, content_instruction, section_number, template)
450
- section["Content"] = content
451
- yield yield_message({
452
- "type": "document_section",
453
- "content": {
454
- "section_number": section_number,
455
- "section_title": section_title,
456
- "content": content
457
- }
458
- })
459
-
460
- for subsection in section.get("Subsections", []):
461
- subsection_title = subsection.get("Title", "")
462
- subsection_number = subsection.get("SectionNumber", "")
463
- subsection_content_instruction = subsection.get("Content", "")
464
- logging.info(f"Generating content for subsection: {subsection_title}")
465
- content = document_generator.generate_content(subsection_title, subsection_content_instruction, subsection_number, template)
466
- subsection["Content"] = content
467
- yield yield_message({
468
- "type": "document_section",
469
- "content": {
470
- "section_number": subsection_number,
471
- "section_title": subsection_title,
472
- "content": content
473
- }
474
- })
475
-
476
- markdown_document = MarkdownConverter.convert_to_markdown(document_generator.document_outline["Document"])
477
-
478
- yield yield_message({
479
- "type": "complete_document",
480
- "content": {
481
- "markdown": markdown_document,
482
- "json": document_generator.document_outline
483
- },
484
- });
485
-
486
- db_manager.update_database("elevatics", query, markdown_document)
487
-
488
-
489
- @cache(expire=600*24*7)
490
- @router.post("/generate-document/json", response_model=JsonDocumentResponse)
491
- async def generate_document_outline_endpoint(request: DocumentRequest):
492
- ai_client = AIClient()
493
- document_generator = DocumentGenerator(ai_client)
494
-
495
- try:
496
- json_document = document_generator.generate_document_outline(request.query, request.template)
497
-
498
- if json_document is None:
499
- raise HTTPException(status_code=500, detail="Failed to generate a valid document outline")
500
-
501
- return JsonDocumentResponse(json_document=json_document)
502
- except Exception as e:
503
- raise HTTPException(status_code=500, detail=str(e))
504
-
505
- @router.post("/generate-document/markdown-stream")
506
- async def generate_markdown_document_stream_endpoint(request: MarkdownDocumentRequest):
507
- ai_client = AIClient()
508
- document_generator = DocumentGenerator(ai_client)
509
-
510
- async def stream_generator():
511
- try:
512
- async for chunk in generate_document_stream(document_generator, request.json_document, request.query, request.template):
513
- yield chunk
514
- except Exception as e:
515
- yield yield_message({
516
- "type": "error",
517
- "content": str(e)
518
- })
519
-
520
- return StreamingResponse(stream_generator(), media_type="application/octet-stream")
521
-
522
-
523
- ## OBSERVABILITY
524
- from uuid import uuid4
525
- import csv
526
- from io import StringIO
527
-
528
- class ObservationResponse(BaseModel):
529
- observations: List[Dict]
530
-
531
- def create_csv_response(observations: List[Dict]) -> StreamingResponse:
532
- def iter_csv(data):
533
- output = StringIO()
534
- writer = csv.DictWriter(output, fieldnames=data[0].keys() if data else [])
535
- writer.writeheader()
536
- for row in data:
537
- writer.writerow(row)
538
- output.seek(0)
539
- yield output.read()
540
-
541
- headers = {
542
- 'Content-Disposition': 'attachment; filename="observations.csv"'
543
- }
544
- return StreamingResponse(iter_csv(observations), media_type="text/csv", headers=headers)
545
-
546
-
547
- @router.get("/last-observations/{limit}")
548
- async def get_last_observations(limit: int = 10, format: str = "json"):
549
- observability_manager = LLMObservabilityManager()
550
-
551
- try:
552
- # Get all observations, sorted by created_at in descending order
553
- all_observations = observability_manager.get_observations()
554
- all_observations.sort(key=lambda x: x['created_at'], reverse=True)
555
-
556
- # Get the last conversation_id
557
- if all_observations:
558
- last_conversation_id = all_observations[0]['conversation_id']
559
-
560
- # Filter observations for the last conversation
561
- last_conversation_observations = [
562
- obs for obs in all_observations
563
- if obs['conversation_id'] == last_conversation_id
564
- ][:limit]
565
-
566
- if format.lower() == "csv":
567
- return create_csv_response(last_conversation_observations)
568
- else:
569
- return ObservationResponse(observations=last_conversation_observations)
570
- else:
571
- if format.lower() == "csv":
572
- return create_csv_response([])
573
- else:
574
- return ObservationResponse(observations=[])
575
- except Exception as e:
576
- raise HTTPException(status_code=500, detail=f"Failed to retrieve observations: {str(e)}")
577
-
578
-
579
-
580
-
581
- ###########################################
582
- class MarkdownDocumentResponse(BaseModel):
583
- markdown_document: str
584
-
585
- @router.post("/generate-document-test", response_model=MarkdownDocumentResponse)
586
- async def test_generate_document_endpoint(request: DocumentRequest):
587
- try:
588
- # Load JSON document from file
589
- json_path = os.path.join("output/document_generator", "ai-chatbot-prd.json")
590
- with open(json_path, "r") as json_file:
591
- json_document = json.load(json_file)
592
-
593
- # Load Markdown document from file
594
- md_path = os.path.join("output/document_generator", "ai-chatbot-prd.md")
595
- with open(md_path, "r") as md_file:
596
- markdown_document = md_file.read()
597
-
598
- return MarkdownDocumentResponse(markdown_document=markdown_document)
599
- except FileNotFoundError:
600
- raise HTTPException(status_code=404, detail="Test files not found")
601
- except json.JSONDecodeError:
602
- raise HTTPException(status_code=500, detail="Error parsing JSON file")
603
- except Exception as e:
604
- raise HTTPException(status_code=500, detail=str(e))
605
-
606
- class CacheTestResponse(BaseModel):
607
- result: str
608
- execution_time: float
609
-
610
- @router.get("/test-cache/{test_id}", response_model=CacheTestResponse)
611
- @cache(expire=60) # Cache for 1 minute
612
- async def test_cache(test_id: int):
613
- start_time = time.time()
614
-
615
- # Simulate some time-consuming operation
616
- await asyncio.sleep(2)
617
-
618
- result = f"Test result for ID: {test_id}"
619
-
620
- end_time = time.time()
621
- execution_time = end_time - start_time
622
-
623
- return CacheTestResponse(
624
- result=result,
625
- execution_time=execution_time
626
- )