ariansyahdedy commited on
Commit
f169c98
·
1 Parent(s): 5d3f538

Before chat history

Browse files
app/handlers/media_handler.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # media_handler.py
2
+ from abc import ABC, abstractmethod
3
+ import logging
4
+ from app.services.download_media import download_whatsapp_media
5
+
6
+ logger = logging.getLogger(__name__)
7
+
8
+ class MediaHandler(ABC):
9
+ @abstractmethod
10
+ async def download(self, media_id: str, access_token: str, file_path: str) -> str:
11
+ pass
12
+
13
+ class WhatsAppMediaHandler(MediaHandler):
14
+ async def download(self, media_id: str, access_token: str, file_path: str) -> str:
15
+ return await download_whatsapp_media(media_id, access_token, file_path)
app/handlers/message_handler.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Optional
2
+
3
+ from app.services.cache import MessageCache
4
+ from app.services.chat_manager import ChatManager
5
+ from app.handlers.media_handler import MediaHandler
6
+ from app.services.message_parser import MessageParser
7
+ from app.services.download_media import download_whatsapp_media
8
+ from app.services.message import process_message_with_llm
9
+ from app.models.message_types import Message, MediaType, MediaContent
10
+
11
+ import logging
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class MessageHandler:
17
+ def __init__(
18
+ self,
19
+ message_cache: MessageCache,
20
+ chat_manager: ChatManager,
21
+ media_handler: MediaHandler,
22
+ logger: logging.Logger
23
+ ):
24
+ self.message_cache = message_cache
25
+ self.chat_manager = chat_manager
26
+ self.media_handler = media_handler
27
+ self.logger = logger
28
+
29
+ async def handle(self, raw_message: dict, access_token: str) -> dict:
30
+ try:
31
+ # Parse message
32
+ message = MessageParser.parse(raw_message)
33
+
34
+ if self.message_cache.exists(message.id):
35
+ self.logger.info(f"Duplicate message detected and skipped: {message.id}")
36
+ return {"status": "duplicate", "message_id": message.id}
37
+
38
+ # Download media
39
+ media_paths = await self._process_media(message, access_token)
40
+
41
+ self.chat_manager.initialize_chat(message.sender_id)
42
+
43
+
44
+ # Process message with LLM
45
+ result = await process_message_with_llm(
46
+ message.sender_id,
47
+ message.content,
48
+ self.chat_manager.get_chat_history(message.sender_id),
49
+ **media_paths
50
+ )
51
+
52
+ # Append message to chat to keep track of conversation
53
+ self.chat_manager.append_message(message.sender_id, "user", message.content)
54
+ self.chat_manager.append_message(message.sender_id, "model", result)
55
+
56
+ self.message_cache.add(message.id)
57
+
58
+ return {"status": "success", "message_id": message.id, "result": result}
59
+
60
+ except Exception as e:
61
+ return {"status": "error", "message_id": raw_message.get("id"), "error": str(e)}
62
+
63
+ async def _process_media(self, message: Message, access_token: str) -> Dict[str, Optional[str]]:
64
+ media_paths = {
65
+ "image_file_path": None,
66
+ "doc_path": None,
67
+ "video_file_path": None
68
+ }
69
+
70
+ if not message.media:
71
+ return media_paths
72
+
73
+ for media_type, content in message.media.items():
74
+ self.logger.info(f"Processing {media_type.value}: {content.file_path}")
75
+ file_path = await self.media_handler.download(
76
+ content.id,
77
+ access_token,
78
+ content.file_path
79
+ )
80
+ self.logger.info(f"{media_type.value} file_path: {file_path}")
81
+
82
+ if media_type == MediaType.IMAGE:
83
+ media_paths["image_file_path"] = file_path
84
+ elif media_type == MediaType.DOCUMENT:
85
+ media_paths["doc_path"] = file_path
86
+ elif media_type == MediaType.VIDEO:
87
+ media_paths["video_file_path"] = file_path
88
+
89
+ return media_paths
app/handlers/webhook_handler.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # webhook_handler.py
2
+ from dataclasses import dataclass
3
+ from typing import List, Dict
4
+ import time
5
+ import logging
6
+ from fastapi import Request, status
7
+ from fastapi.responses import JSONResponse
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+ @dataclass
12
+ class WebhookResponse:
13
+ request_id: str
14
+ results: List[Dict]
15
+
16
+ class WebhookHandler:
17
+ def __init__(self, message_handler):
18
+ self.message_handler = message_handler
19
+ self.logger = logging.getLogger(__name__)
20
+
21
+ async def process_webhook(self, payload: dict, access_token: str) -> WebhookResponse:
22
+ request_id = f"req_{int(time.time()*1000)}"
23
+ results = []
24
+
25
+ self.logger.info(f"Processing webhook request {payload}")
26
+
27
+ try:
28
+ entries = payload.get("entry", [])
29
+ for entry in entries:
30
+ entry_id = entry.get("id")
31
+ self.logger.info(f"Processing entry_id: {entry_id}")
32
+
33
+ changes = entry.get("changes", [])
34
+ for change in changes:
35
+ messages = change.get("value", {}).get("messages", [])
36
+ for message in messages:
37
+ self.logger.info(f"Processing message: {message}")
38
+ response = await self.message_handler.handle(
39
+ raw_message=message,
40
+ access_token=access_token
41
+ )
42
+ results.append(response)
43
+
44
+ except Exception as e:
45
+ self.logger.error(f"Error processing webhook: {str(e)}")
46
+ return WebhookResponse(
47
+ request_id=request_id,
48
+ results=[{"status": "error", "error": str(e)}]
49
+ )
50
+
51
+ self.logger.info(f"Webhook processing completed - Results: {len(results)}")
52
+ return WebhookResponse(request_id=request_id, results=results)
app/main.py CHANGED
@@ -2,15 +2,27 @@ from fastapi import FastAPI, Request, status
2
  from fastapi.responses import JSONResponse
3
  from fastapi.responses import Response
4
  from fastapi.exceptions import HTTPException
5
- from typing import Dict
6
- from app.services.message import generate_reply, send_reply, process_message_with_retry
 
 
 
 
 
7
  import logging
8
  from datetime import datetime
9
  import time
10
  from contextlib import asynccontextmanager
11
  from app.db.database import create_indexes, init_db
12
  from app.endpoints.v1 import users
13
- from app.services.webhook_handler import webhook, verify_webhook
 
 
 
 
 
 
 
14
 
15
  # Configure logging
16
  logging.basicConfig(
@@ -19,6 +31,24 @@ logging.basicConfig(
19
  )
20
 
21
  logger = logging.getLogger(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  # Initialize FastAPI app
23
  @asynccontextmanager
24
  async def lifespan(app: FastAPI):
@@ -26,21 +56,84 @@ async def lifespan(app: FastAPI):
26
 
27
  try:
28
  await init_db()
 
29
  logger.info("Connected to the MongoDB database!")
 
 
 
 
 
30
  # collections = app.database.list_collection_names()
31
  # print(f"Collections in {db_name}: {collections}")
32
  yield
33
  except Exception as e:
34
  logger.error(e)
35
-
 
 
36
  app = FastAPI(lifespan=lifespan)
 
 
 
37
 
38
 
39
  app.include_router(users.router, prefix="/users", tags=["Users"])
40
 
 
 
 
 
 
 
 
41
  # Register webhook routes
42
- app.post("/webhook")(webhook)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  app.get("/webhook")(verify_webhook)
 
 
 
 
 
 
44
  # In-memory cache with timestamp cleanup
45
  # class MessageCache:
46
  # def __init__(self, max_age_hours: int = 24):
 
2
  from fastapi.responses import JSONResponse
3
  from fastapi.responses import Response
4
  from fastapi.exceptions import HTTPException
5
+ from slowapi import Limiter, _rate_limit_exceeded_handler
6
+ from slowapi.errors import RateLimitExceeded
7
+ from slowapi.util import get_remote_address
8
+ from typing import Dict, List
9
+ from prometheus_client import Counter, Histogram, start_http_server
10
+ from pydantic import BaseModel, ValidationError
11
+ from app.services.message import generate_reply, send_reply
12
  import logging
13
  from datetime import datetime
14
  import time
15
  from contextlib import asynccontextmanager
16
  from app.db.database import create_indexes, init_db
17
  from app.endpoints.v1 import users
18
+ from app.services.webhook_handler import verify_webhook
19
+ from app.handlers.message_handler import MessageHandler
20
+ from app.handlers.webhook_handler import WebhookHandler
21
+ from app.handlers.media_handler import WhatsAppMediaHandler
22
+ from app.services.cache import MessageCache
23
+ from app.services.chat_manager import ChatManager
24
+
25
+ from app.utils.load_env import ACCESS_TOKEN
26
 
27
  # Configure logging
28
  logging.basicConfig(
 
31
  )
32
 
33
  logger = logging.getLogger(__name__)
34
+
35
+ # Initialize handlers at startup
36
+ message_handler = None
37
+ webhook_handler = None
38
+
39
+
40
+ async def setup_message_handler():
41
+ logger = logging.getLogger(__name__)
42
+ message_cache = MessageCache()
43
+ chat_manager = ChatManager()
44
+ media_handler = WhatsAppMediaHandler()
45
+
46
+ return MessageHandler(
47
+ message_cache=message_cache,
48
+ chat_manager=chat_manager,
49
+ media_handler=media_handler,
50
+ logger=logger
51
+ )
52
  # Initialize FastAPI app
53
  @asynccontextmanager
54
  async def lifespan(app: FastAPI):
 
56
 
57
  try:
58
  await init_db()
59
+
60
  logger.info("Connected to the MongoDB database!")
61
+
62
+
63
+ global message_handler, webhook_handler
64
+ message_handler = await setup_message_handler()
65
+ webhook_handler = WebhookHandler(message_handler)
66
  # collections = app.database.list_collection_names()
67
  # print(f"Collections in {db_name}: {collections}")
68
  yield
69
  except Exception as e:
70
  logger.error(e)
71
+
72
+ # Initialize Limiter and Prometheus Metrics
73
+ limiter = Limiter(key_func=get_remote_address)
74
  app = FastAPI(lifespan=lifespan)
75
+ app.state.limiter = limiter
76
+ app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
77
+
78
 
79
 
80
  app.include_router(users.router, prefix="/users", tags=["Users"])
81
 
82
+
83
+ # Prometheus metrics
84
+ webhook_requests = Counter('webhook_requests_total', 'Total webhook requests')
85
+ webhook_processing_time = Histogram('webhook_processing_seconds', 'Time spent processing webhook')
86
+
87
+ # Start Prometheus metrics server on port 8002
88
+ # start_http_server(8002)
89
  # Register webhook routes
90
+ # app.post("/webhook")(webhook)
91
+ # Define Pydantic schema for request validation
92
+ class WebhookPayload(BaseModel):
93
+ entry: List[Dict]
94
+
95
+ @app.post("/webhook")
96
+ @limiter.limit("100/minute")
97
+ async def webhook(request: Request):
98
+ try:
99
+ payload = await request.json()
100
+
101
+ # validated_payload = WebhookPayload(**payload) # Validate payload
102
+ # logger.info(f"Validated Payload: {validated_payload}")
103
+
104
+ # Process the webhook payload here
105
+ # For example:
106
+ # results = process_webhook_entries(validated_payload.entry)
107
+ response = await webhook_handler.process_webhook(
108
+ payload=payload,
109
+ access_token=ACCESS_TOKEN
110
+ )
111
+
112
+ return JSONResponse(
113
+ content=response.__dict__,
114
+ status_code=status.HTTP_200_OK
115
+ )
116
+
117
+ except ValidationError as ve:
118
+ logger.error(f"Validation error: {ve}")
119
+ return JSONResponse(
120
+ content={"status": "error", "detail": ve.errors()},
121
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY
122
+ )
123
+ except Exception as e:
124
+ logger.error(f"Unexpected error: {str(e)}")
125
+ return JSONResponse(
126
+ content={"status": "error", "detail": str(e)},
127
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
128
+ )
129
+
130
  app.get("/webhook")(verify_webhook)
131
+
132
+ # Add a route for Prometheus metrics (optional, if not using a separate Prometheus server)
133
+ @app.get("/metrics")
134
+ async def metrics():
135
+ from prometheus_client import generate_latest
136
+ return Response(content=generate_latest(), media_type="text/plain")
137
  # In-memory cache with timestamp cleanup
138
  # class MessageCache:
139
  # def __init__(self, max_age_hours: int = 24):
app/models/message_types.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # message_types.py
2
+ from dataclasses import dataclass
3
+ from typing import Optional, Dict, List
4
+ from enum import Enum
5
+
6
+ class MediaType(Enum):
7
+ IMAGE = "image"
8
+ DOCUMENT = "document"
9
+ VIDEO = "video"
10
+
11
+ @dataclass
12
+ class MediaContent:
13
+ id: str
14
+ file_path: str
15
+ mime_type: Optional[str] = None
16
+ filename: Optional[str] = None
17
+
18
+ @dataclass
19
+ class Message:
20
+ id: str
21
+ sender_id: str
22
+ content: Optional[str]
23
+ media: Optional[Dict[MediaType, MediaContent]] = None
app/services/chat_manager.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List
2
+
3
+ # chat_manager.py
4
+ class ChatManager:
5
+ def __init__(self):
6
+ self.user_chats: Dict[str, List[Dict]] = {}
7
+
8
+ def initialize_chat(self, sender_id: str) -> None:
9
+ if sender_id not in self.user_chats:
10
+ self.user_chats[sender_id] = [
11
+ {"role": "user", "parts": "This is the chat history so far"}
12
+ ]
13
+
14
+ def append_message(self, sender_id: str, role: str, content: str) -> None:
15
+ if content:
16
+ self.user_chats[sender_id].append({"role": role, "parts": content})
17
+
18
+ def get_chat_history(self, sender_id: str) -> List[Dict]:
19
+ return self.user_chats.get(sender_id, [])
app/services/message.py CHANGED
@@ -18,8 +18,6 @@ load_dotenv()
18
 
19
  genai.configure(api_key=GEMNI_API)
20
  client = AsyncOpenAI(api_key = OPENAI_API)
21
- # ACCESS_TOKEN = "EAAQNYsHYo2cBO30HZCtSkpRppS8ZA6Du2MV9PPFFepInIb2og5cjP1ZAZBJrJhHTlkmcrcN0BS6NZAqMltXhOZBzAyGdNaZCL6XtC6ZCdcug5JR5fZCdXVMzOBbuwPKPBwZABhFsWDIAORYaCI97ajvUxrZCSALNZAIH1E5hs2ZApETvWoVMiZCnoT5MqpyAZBoFFJ84K6vFgZDZD"
22
-
23
  # Configure logging
24
  logging.basicConfig(
25
  level=logging.INFO,
@@ -27,11 +25,6 @@ logging.basicConfig(
27
  )
28
  logger = logging.getLogger(__name__)
29
 
30
- # Define the WhatsApp API URL and Access Token
31
- # WHATSAPP_API_URL = os.environ.get("WHATSAPP_API_URL")
32
- # WHATSAPP_API_URL = "https://graph.facebook.com/v21.0/360447720489034/messages"
33
- # ACCESS_TOKEN = os.environ.get("ACCESS_TOKEN")
34
-
35
  # Validate environment variables
36
  if not WHATSAPP_API_URL or not ACCESS_TOKEN:
37
  logger.warning("Environment variables for WHATSAPP_API_URL or ACCESS_TOKEN are not set!")
@@ -77,88 +70,166 @@ async def generate_reply(sender: str, content: str, timestamp: int) -> str:
77
  logger.error(f"Error generating reply: {str(e)}", exc_info=True)
78
  return f"Sorry {sender}, I couldn't process your message. Please try again."
79
 
80
- # Process message with retry logic
81
- async def process_message_with_retry(
82
  sender_id: str,
83
  content: str,
84
- history: List[str],
85
- timestamp: Optional[int] = None,
86
- media: Optional[Dict[str, Any]] = None,
87
  image_file_path: Optional[str] = None,
88
  doc_path: Optional[str] = None,
89
- ) -> Dict[str, Any]:
90
- """Process message with retry logic"""
91
- retries = 1
92
- delay = 0.1 # Initial delay in seconds
93
-
94
- # for attempt in range(retries):
95
  try:
96
- logger.info(f"Sending message to the Gemini model...")
97
- generated_reply = await generate_response_from_gemini(sender = sender_id, content=content, history = history, timestamp = timestamp, image_file_path = image_file_path, media=media, doc_path = doc_path)
98
- logger.info(f"Reply generated: {generated_reply}")
 
 
 
 
 
 
 
 
99
  response = await send_reply(sender_id, generated_reply)
 
100
  return generated_reply
101
- return {"status": "success", "reply": generated_reply, "response": response}
102
  except Exception as e:
103
- logger.error(f"Error generating reply: {str(e)}", exc_info=True)
104
- return {"status": "error", "reply": "Sorry, I couldn't generate a response at this time."}
105
- # logger.error(f"Attempt {attempt + 1} failed: {str(e)}", exc_info=True)
106
- # if attempt < retries - 1:
107
- # await asyncio.sleep(delay)
108
- # delay *= 2 # Exponential backoff
109
- # else:
110
- # raise Exception(f"All {retries} attempts failed.") from e
111
-
112
- # Example usage
113
- # asyncio.run(process_message_with_retry("1234567890", "hello", 1700424056000))
114
-
115
-
116
- async def generate_response_from_gemini(sender: str, content: str, timestamp: str, history: List[Dict[str, str]], media: Optional[Dict[str, Any]] = None, image_file_path: Optional[str] = None, doc_path: Optional[str] = None) -> str:
117
  try:
118
- print(f"Sender: {sender}")
119
- print(f"Content: {content}")
120
- print(f"Timestamp: {timestamp}")
121
- print(f"History: {history}")
122
- print(f"Media: {media}")
123
 
124
  # Initialize the model
125
  model = genai.GenerativeModel("gemini-1.5-pro-002")
126
 
127
- # Define the chat history
128
- chat = model.start_chat(
129
- history=history
130
- )
131
- logger.info(f"file_path: {image_file_path}")
132
- if image_file_path: # Should be bytes or a file-like object
133
-
134
 
135
- prompt = "Describe the following image:"
 
 
136
  image_data = PIL.Image.open(image_file_path)
137
-
138
- print("Sending image to the Gemini model...")
139
  response = await chat.send_message_async(image_data)
140
- print(f"Model response: {response.text}")
141
  return response.text
142
-
 
143
  if doc_path:
 
144
  doc_data = genai.upload_file(doc_path)
145
- print("Sending document to the Gemini model...")
146
  response = await chat.send_message_async(doc_data)
147
- print(f"Model response: {response.text}")
148
  return response.text
149
 
 
 
 
 
 
 
 
 
 
150
  # Send the user's message
151
- print("Sending message to the Gemini model...")
152
  response = await chat.send_message_async(content)
153
- print(f"Model response: {response.text}")
154
-
155
  return response.text
156
 
157
  except Exception as e:
158
- print("Error generating reply from Gemini:", e)
159
  return "Sorry, I couldn't generate a response at this time."
160
 
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  async def generate_response_from_chatgpt(sender: str, content: str, timestamp: str, history: str) -> str:
163
  """
164
  Generate a reply using OpenAI's ChatGPT API.
 
18
 
19
  genai.configure(api_key=GEMNI_API)
20
  client = AsyncOpenAI(api_key = OPENAI_API)
 
 
21
  # Configure logging
22
  logging.basicConfig(
23
  level=logging.INFO,
 
25
  )
26
  logger = logging.getLogger(__name__)
27
 
 
 
 
 
 
28
  # Validate environment variables
29
  if not WHATSAPP_API_URL or not ACCESS_TOKEN:
30
  logger.warning("Environment variables for WHATSAPP_API_URL or ACCESS_TOKEN are not set!")
 
70
  logger.error(f"Error generating reply: {str(e)}", exc_info=True)
71
  return f"Sorry {sender}, I couldn't process your message. Please try again."
72
 
73
+ async def process_message_with_llm(
 
74
  sender_id: str,
75
  content: str,
76
+ history: List[Dict[str, str]],
 
 
77
  image_file_path: Optional[str] = None,
78
  doc_path: Optional[str] = None,
79
+ video_file_path: Optional[str] = None,
80
+ ) -> str:
81
+ """Process message with retry logic."""
 
 
 
82
  try:
83
+ logger.info(f"Processing message for sender: {sender_id}")
84
+ generated_reply = await generate_response_from_gemini(
85
+ sender=sender_id,
86
+ content=content,
87
+ history=history,
88
+ image_file_path=image_file_path,
89
+ doc_path=doc_path,
90
+ video_file_path=video_file_path
91
+ )
92
+ logger.info(f"Generated reply: {generated_reply}")
93
+
94
  response = await send_reply(sender_id, generated_reply)
95
+ # return generated_reply
96
  return generated_reply
 
97
  except Exception as e:
98
+ logger.error(f"Error in process_message_with_retry: {str(e)}", exc_info=True)
99
+ return "Sorry, I couldn't generate a response at this time."
100
+
101
+ async def generate_response_from_gemini(
102
+ sender: str,
103
+ content: str,
104
+ history: List[Dict[str, str]],
105
+ image_file_path: Optional[str] = None,
106
+ doc_path: Optional[str] = None,
107
+ video_file_path: Optional[str] = None,
108
+ ) -> str:
 
 
 
109
  try:
110
+ logger.info(f"Generating response for sender: {sender}")
 
 
 
 
111
 
112
  # Initialize the model
113
  model = genai.GenerativeModel("gemini-1.5-pro-002")
114
 
115
+ # Start chat with history
116
+ chat = model.start_chat(history=history)
 
 
 
 
 
117
 
118
+ # Process image
119
+ if image_file_path:
120
+ logger.info(f"Processing image at {image_file_path}")
121
  image_data = PIL.Image.open(image_file_path)
 
 
122
  response = await chat.send_message_async(image_data)
 
123
  return response.text
124
+
125
+ # Process document
126
  if doc_path:
127
+ logger.info(f"Processing document at {doc_path}")
128
  doc_data = genai.upload_file(doc_path)
 
129
  response = await chat.send_message_async(doc_data)
 
130
  return response.text
131
 
132
+ # Process video (if supported)
133
+ if video_file_path:
134
+ logger.info(f"Processing video at {video_file_path}")
135
+ video_data = genai.upload_file(video_file_path)
136
+ response = await chat.send_message_async(video_data)
137
+ return response.text
138
+ # Implement video processing logic here
139
+ pass # Placeholder for video processing logic
140
+
141
  # Send the user's message
 
142
  response = await chat.send_message_async(content)
 
 
143
  return response.text
144
 
145
  except Exception as e:
146
+ logger.error("Error in generate_response_from_gemini:", exc_info=True)
147
  return "Sorry, I couldn't generate a response at this time."
148
 
149
 
150
+
151
+ # Process message with retry logic
152
+ # async def process_message_with_retry(
153
+ # sender_id: str,
154
+ # content: str,
155
+ # history: List[str],
156
+ # timestamp: Optional[int] = None,
157
+ # media: Optional[Dict[str, Any]] = None,
158
+ # image_file_path: Optional[str] = None,
159
+ # doc_path: Optional[str] = None,
160
+ # ) -> Dict[str, Any]:
161
+ # """Process message with retry logic"""
162
+ # retries = 1
163
+ # delay = 0.1 # Initial delay in seconds
164
+
165
+ # # for attempt in range(retries):
166
+ # try:
167
+ # logger.info(f"Sending message to the Gemini model...")
168
+ # generated_reply = await generate_response_from_gemini(sender = sender_id, content=content, history = history, timestamp = timestamp, image_file_path = image_file_path, media=media, doc_path = doc_path)
169
+ # logger.info(f"Reply generated: {generated_reply}")
170
+ # response = await send_reply(sender_id, generated_reply)
171
+ # return generated_reply
172
+ # return {"status": "success", "reply": generated_reply, "response": response}
173
+ # except Exception as e:
174
+ # logger.error(f"Error generating reply: {str(e)}", exc_info=True)
175
+ # return {"status": "error", "reply": "Sorry, I couldn't generate a response at this time."}
176
+ # logger.error(f"Attempt {attempt + 1} failed: {str(e)}", exc_info=True)
177
+ # if attempt < retries - 1:
178
+ # await asyncio.sleep(delay)
179
+ # delay *= 2 # Exponential backoff
180
+ # else:
181
+ # raise Exception(f"All {retries} attempts failed.") from e
182
+
183
+ # Example usage
184
+ # asyncio.run(process_message_with_retry("1234567890", "hello", 1700424056000))
185
+
186
+
187
+ # async def generate_response_from_gemini(sender: str, content: str, timestamp: str, history: List[Dict[str, str]], media: Optional[Dict[str, Any]] = None, image_file_path: Optional[str] = None, doc_path: Optional[str] = None) -> str:
188
+ # try:
189
+ # print(f"Sender: {sender}")
190
+ # print(f"Content: {content}")
191
+ # print(f"Timestamp: {timestamp}")
192
+ # print(f"History: {history}")
193
+ # print(f"Media: {media}")
194
+
195
+ # # Initialize the model
196
+ # model = genai.GenerativeModel("gemini-1.5-pro-002")
197
+
198
+ # # Define the chat history
199
+ # chat = model.start_chat(
200
+ # history=history
201
+ # )
202
+ # logger.info(f"file_path: {image_file_path}")
203
+ # if image_file_path: # Should be bytes or a file-like object
204
+
205
+
206
+ # prompt = "Describe the following image:"
207
+ # image_data = PIL.Image.open(image_file_path)
208
+
209
+ # print("Sending image to the Gemini model...")
210
+ # response = await chat.send_message_async(image_data)
211
+ # print(f"Model response: {response.text}")
212
+ # return response.text
213
+
214
+ # if doc_path:
215
+ # doc_data = genai.upload_file(doc_path)
216
+ # print("Sending document to the Gemini model...")
217
+ # response = await chat.send_message_async(doc_data)
218
+ # print(f"Model response: {response.text}")
219
+ # return response.text
220
+
221
+ # # Send the user's message
222
+ # print("Sending message to the Gemini model...")
223
+ # response = await chat.send_message_async(content)
224
+ # print(f"Model response: {response.text}")
225
+
226
+ # return response.text
227
+
228
+ # except Exception as e:
229
+ # print("Error generating reply from Gemini:", e)
230
+ # return "Sorry, I couldn't generate a response at this time."
231
+
232
+
233
  async def generate_response_from_chatgpt(sender: str, content: str, timestamp: str, history: str) -> str:
234
  """
235
  Generate a reply using OpenAI's ChatGPT API.
app/services/message_parser.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # message_parser.py
2
+
3
+ from app.models.message_types import Message, MediaContent, MediaType
4
+
5
+ class MessageParser:
6
+ @staticmethod
7
+ def parse(raw_message: dict) -> Message:
8
+ media = {}
9
+
10
+ if image := raw_message.get("image"):
11
+ media[MediaType.IMAGE] = MediaContent(
12
+ id=image["id"],
13
+ file_path=f"{image['id']}.jpg"
14
+ )
15
+
16
+ if document := raw_message.get("document"):
17
+ media[MediaType.DOCUMENT] = MediaContent(
18
+ id=document["id"],
19
+ file_path=document.get("filename"),
20
+ filename=document.get("filename")
21
+ )
22
+
23
+ if video := raw_message.get("video"):
24
+ mime_type = video.get("mime_type")
25
+ media[MediaType.VIDEO] = MediaContent(
26
+ id=video["id"],
27
+ file_path=f"{video['id']}.{mime_type.split('/')[-1]}",
28
+ mime_type=mime_type
29
+ )
30
+
31
+ return Message(
32
+ id=raw_message["id"],
33
+ sender_id=raw_message["from"],
34
+ content=raw_message.get("text", {}).get("body"),
35
+ media=media if media else None
36
+ )
app/services/webhook_handler.py CHANGED
@@ -3,7 +3,7 @@ from fastapi import Request, status
3
  from fastapi.responses import JSONResponse, Response
4
  from fastapi.exceptions import HTTPException
5
  from app.services.cache import MessageCache
6
- from app.services.message import process_message_with_retry
7
  from app.services.download_media import download_whatsapp_media
8
  from app.utils.handle_message import handle_message
9
  from app.utils.load_env import ACCESS_TOKEN
@@ -13,39 +13,40 @@ logger = logging.getLogger(__name__)
13
  message_cache = MessageCache()
14
  user_chats = {}
15
 
16
- async def webhook(request: Request):
17
- request_id = f"req_{int(time.time()*1000)}"
18
-
19
- payload = await request.json()
20
- logger.info(f"Processing webhook request {payload}")
21
- processed_count = 0
22
- error_count = 0
23
- results = []
24
-
25
- entries = payload.get("entry", [])
26
- for entry in entries:
27
- entry_id = entry.get("id")
28
- logger.info(f"Processing entry_id: {entry_id}")
29
-
30
- changes = entry.get("changes", [])
31
- for change in changes:
32
- messages = change.get("value", {}).get("messages", [])
33
- media = {}
34
- for message in messages:
35
- logger.info(f"Processing message: {message}")
36
- response = await handle_message(message=message, user_chats = user_chats, message_cache = message_cache, access_token = ACCESS_TOKEN)
37
-
38
- results.append(response)
39
-
40
- response_data = {
41
- "request_id": request_id,
42
- # "processed": processed_count,
43
- # "errors": error_count,
44
- "results": results,
45
- }
46
-
47
- logger.info(f"Webhook processing completed - Processed: {processed_count}, Errors: {error_count}")
48
- return JSONResponse(content=response_data, status_code=status.HTTP_200_OK)
 
49
 
50
  async def verify_webhook(request: Request):
51
  mode = request.query_params.get('hub.mode')
 
3
  from fastapi.responses import JSONResponse, Response
4
  from fastapi.exceptions import HTTPException
5
  from app.services.cache import MessageCache
6
+
7
  from app.services.download_media import download_whatsapp_media
8
  from app.utils.handle_message import handle_message
9
  from app.utils.load_env import ACCESS_TOKEN
 
13
  message_cache = MessageCache()
14
  user_chats = {}
15
 
16
+
17
+ # async def webhook(request: Request):
18
+ # request_id = f"req_{int(time.time()*1000)}"
19
+
20
+ # payload = await request.json()
21
+ # logger.info(f"Processing webhook request {payload}")
22
+ # processed_count = 0
23
+ # error_count = 0
24
+ # results = []
25
+
26
+ # entries = payload.get("entry", [])
27
+ # for entry in entries:
28
+ # entry_id = entry.get("id")
29
+ # logger.info(f"Processing entry_id: {entry_id}")
30
+
31
+ # changes = entry.get("changes", [])
32
+ # for change in changes:
33
+ # messages = change.get("value", {}).get("messages", [])
34
+ # media = {}
35
+ # for message in messages:
36
+ # logger.info(f"Processing message: {message}")
37
+ # response = await handle_message(message=message, user_chats = user_chats, message_cache = message_cache, access_token = ACCESS_TOKEN)
38
+
39
+ # results.append(response)
40
+
41
+ # response_data = {
42
+ # "request_id": request_id,
43
+ # # "processed": processed_count,
44
+ # # "errors": error_count,
45
+ # "results": results,
46
+ # }
47
+
48
+ # logger.info(f"Webhook processing completed - Processed: {processed_count}, Errors: {error_count}")
49
+ # return JSONResponse(content=response_data, status_code=status.HTTP_200_OK)
50
 
51
  async def verify_webhook(request: Request):
52
  mode = request.query_params.get('hub.mode')
app/utils/handle_message.py CHANGED
@@ -1,5 +1,5 @@
1
  from app.services.download_media import download_whatsapp_media
2
- from app.services.message import process_message_with_retry
3
  from app.services.cache import MessageCache
4
  import logging
5
 
@@ -61,7 +61,7 @@ async def handle_message(message, user_chats, message_cache, access_token):
61
 
62
 
63
  # Process the message
64
- result = await process_message_with_retry(sender_id, content, user_chats[sender_id], image_file_path = image_file_path, doc_path = document_file_path)
65
 
66
  if content != None:
67
  user_chats[sender_id].append({"role": "user", "parts": content})
 
1
  from app.services.download_media import download_whatsapp_media
2
+ from app.services.message import process_message_with_llm
3
  from app.services.cache import MessageCache
4
  import logging
5
 
 
61
 
62
 
63
  # Process the message
64
+ result = await process_message_with_llm(sender_id, content, user_chats[sender_id], image_file_path = image_file_path, doc_path = document_file_path)
65
 
66
  if content != None:
67
  user_chats[sender_id].append({"role": "user", "parts": content})
testcode.py CHANGED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import PIL.Image
2
+
3
+ organ = PIL.Image.open("organ.jpg")
4
+ print(organ)