Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,8 @@ from admin_messages import extract_admin_message, save_admin_message
|
|
10 |
|
11 |
app = Flask(__name__)
|
12 |
|
13 |
-
|
|
|
14 |
|
15 |
chat_histories = {}
|
16 |
|
@@ -33,6 +34,47 @@ def verify_webhook():
|
|
33 |
print(f"Verification failed: mode={mode}, token={token}") # Debug log
|
34 |
return "Verification failed", 403
|
35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
@app.route("/webhook", methods=["POST"])
|
37 |
def handle_message():
|
38 |
body = request.get_json()
|
@@ -97,24 +139,8 @@ def handle_message():
|
|
97 |
sender_id = messaging_event["sender"]["id"]
|
98 |
message = messaging_event["message"]
|
99 |
|
100 |
-
#
|
101 |
-
user_full_name =
|
102 |
-
|
103 |
-
try:
|
104 |
-
proxy_params = {
|
105 |
-
"sender_id": sender_id,
|
106 |
-
"page_token": page_token
|
107 |
-
}
|
108 |
-
proxy_response = requests.get(PROXY_API_URL, params=proxy_params)
|
109 |
-
proxy_response.raise_for_status()
|
110 |
-
proxy_data = proxy_response.json()
|
111 |
-
|
112 |
-
if proxy_data["success"]:
|
113 |
-
user_full_name = proxy_data["full_name"]
|
114 |
-
else:
|
115 |
-
print(f"Proxy API error: {proxy_data['error']}")
|
116 |
-
except Exception as e:
|
117 |
-
print(f"Error calling proxy API: {str(e)}")
|
118 |
|
119 |
# Get or create chat history for this user
|
120 |
user_history = chat_histories.setdefault(sender_id, {
|
@@ -137,7 +163,7 @@ def handle_message():
|
|
137 |
if attachment["type"] == "image":
|
138 |
image_url = attachment["payload"]["url"]
|
139 |
try:
|
140 |
-
img_response = requests.get(image_url)
|
141 |
img_response.raise_for_status()
|
142 |
temp_path = f"temp_{uuid.uuid4()}.jpg"
|
143 |
with open(temp_path, "wb") as f:
|
@@ -154,7 +180,7 @@ def handle_message():
|
|
154 |
elif attachment["type"] == "audio":
|
155 |
audio_url = attachment["payload"]["url"]
|
156 |
try:
|
157 |
-
audio_response = requests.get(audio_url)
|
158 |
audio_response.raise_for_status()
|
159 |
|
160 |
# Extract file extension from the original filename
|
@@ -214,43 +240,66 @@ def handle_message():
|
|
214 |
while len(user_history['history']) > 5:
|
215 |
user_history['history'].pop(0)
|
216 |
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
payload = {
|
249 |
-
"recipient_id": sender_id,
|
250 |
-
"message_text": response_to_send,
|
251 |
-
"page_access_token": page_token
|
252 |
-
}
|
253 |
-
requests.post(AI_SERVICE_URL, json=payload, headers=headers)
|
254 |
|
255 |
return "OK", 200
|
256 |
|
|
|
10 |
|
11 |
app = Flask(__name__)
|
12 |
|
13 |
+
# Cache for user profiles to reduce API calls
|
14 |
+
user_profile_cache = {}
|
15 |
|
16 |
chat_histories = {}
|
17 |
|
|
|
34 |
print(f"Verification failed: mode={mode}, token={token}") # Debug log
|
35 |
return "Verification failed", 403
|
36 |
|
37 |
+
def get_user_profile(sender_id, page_token):
|
38 |
+
"""Get user profile with fallback mechanisms"""
|
39 |
+
# Check cache first
|
40 |
+
cache_key = f"{sender_id}_{page_token[:10]}" # Use first 10 chars of token for cache key
|
41 |
+
if cache_key in user_profile_cache:
|
42 |
+
print(f"Using cached profile for {sender_id}")
|
43 |
+
return user_profile_cache[cache_key]
|
44 |
+
|
45 |
+
# Default fallback name
|
46 |
+
user_full_name = "User"
|
47 |
+
|
48 |
+
try:
|
49 |
+
proxy_params = {
|
50 |
+
"sender_id": sender_id,
|
51 |
+
"page_token": page_token
|
52 |
+
}
|
53 |
+
|
54 |
+
# Add timeout to prevent hanging
|
55 |
+
proxy_response = requests.get(PROXY_API_URL, params=proxy_params, timeout=5)
|
56 |
+
|
57 |
+
# Even if the request fails with non-200 status, continue with default name
|
58 |
+
if proxy_response.status_code == 200:
|
59 |
+
proxy_data = proxy_response.json()
|
60 |
+
if proxy_data.get("success", False):
|
61 |
+
user_full_name = proxy_data.get("full_name", "User")
|
62 |
+
# Cache the successful result
|
63 |
+
user_profile_cache[cache_key] = user_full_name
|
64 |
+
else:
|
65 |
+
print(f"Proxy API returned error: {proxy_data.get('error', 'Unknown error')}")
|
66 |
+
else:
|
67 |
+
print(f"Proxy API returned status code: {proxy_response.status_code}")
|
68 |
+
|
69 |
+
except requests.exceptions.Timeout:
|
70 |
+
print(f"Timeout when calling proxy API for user {sender_id}")
|
71 |
+
except requests.exceptions.RequestException as e:
|
72 |
+
print(f"Error calling proxy API: {str(e)}")
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Unexpected error getting user profile: {str(e)}")
|
75 |
+
|
76 |
+
return user_full_name
|
77 |
+
|
78 |
@app.route("/webhook", methods=["POST"])
|
79 |
def handle_message():
|
80 |
body = request.get_json()
|
|
|
139 |
sender_id = messaging_event["sender"]["id"]
|
140 |
message = messaging_event["message"]
|
141 |
|
142 |
+
# Get user profile with improved error handling
|
143 |
+
user_full_name = get_user_profile(sender_id, page_token)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
# Get or create chat history for this user
|
146 |
user_history = chat_histories.setdefault(sender_id, {
|
|
|
163 |
if attachment["type"] == "image":
|
164 |
image_url = attachment["payload"]["url"]
|
165 |
try:
|
166 |
+
img_response = requests.get(image_url, timeout=10)
|
167 |
img_response.raise_for_status()
|
168 |
temp_path = f"temp_{uuid.uuid4()}.jpg"
|
169 |
with open(temp_path, "wb") as f:
|
|
|
180 |
elif attachment["type"] == "audio":
|
181 |
audio_url = attachment["payload"]["url"]
|
182 |
try:
|
183 |
+
audio_response = requests.get(audio_url, timeout=10)
|
184 |
audio_response.raise_for_status()
|
185 |
|
186 |
# Extract file extension from the original filename
|
|
|
240 |
while len(user_history['history']) > 5:
|
241 |
user_history['history'].pop(0)
|
242 |
|
243 |
+
try:
|
244 |
+
# Generate AI response using the dynamically updated system prompt
|
245 |
+
ai_response = get_qwen_response(
|
246 |
+
system_prompt=user_history['system_prompt'],
|
247 |
+
chat_history=user_history['history']
|
248 |
+
)
|
249 |
+
|
250 |
+
# Check if the AI response contains an admin message
|
251 |
+
admin_message, cleaned_response = extract_admin_message(ai_response)
|
252 |
+
|
253 |
+
# Always add the original response to chat history (with admin tags if present)
|
254 |
+
user_history['history'].append({
|
255 |
+
"role": "assistant",
|
256 |
+
"content": [{"type": "text", "text": ai_response}]
|
257 |
+
})
|
258 |
+
|
259 |
+
# If there's an admin message, save it to the GitHub JSON file
|
260 |
+
if admin_message:
|
261 |
+
print(f"Admin message detected: {admin_message}")
|
262 |
+
save_admin_message(page_id, admin_message, sender_id, user_full_name)
|
263 |
+
|
264 |
+
# Send the cleaned response (without admin tags) to the user
|
265 |
+
response_to_send = cleaned_response
|
266 |
+
else:
|
267 |
+
# No admin message, send the original response
|
268 |
+
response_to_send = ai_response
|
269 |
+
|
270 |
+
# Send the appropriate response back to the user
|
271 |
+
try:
|
272 |
+
headers = {
|
273 |
+
"Authorization": f"Bearer {AI_SERVICE_TOKEN}",
|
274 |
+
"Content-Type": "application/json"
|
275 |
+
}
|
276 |
+
payload = {
|
277 |
+
"recipient_id": sender_id,
|
278 |
+
"message_text": response_to_send,
|
279 |
+
"page_access_token": page_token
|
280 |
+
}
|
281 |
+
send_response = requests.post(AI_SERVICE_URL, json=payload, headers=headers, timeout=10)
|
282 |
+
send_response.raise_for_status()
|
283 |
+
print(f"Response sent successfully to {sender_id}")
|
284 |
+
except Exception as e:
|
285 |
+
print(f"Error sending response to user: {str(e)}")
|
286 |
|
287 |
+
except Exception as e:
|
288 |
+
print(f"Error generating AI response: {str(e)}")
|
289 |
+
# Send a fallback message if AI response generation fails
|
290 |
+
try:
|
291 |
+
headers = {
|
292 |
+
"Authorization": f"Bearer {AI_SERVICE_TOKEN}",
|
293 |
+
"Content-Type": "application/json"
|
294 |
+
}
|
295 |
+
payload = {
|
296 |
+
"recipient_id": sender_id,
|
297 |
+
"message_text": "I'm sorry, I'm having trouble processing your request right now. Please try again later.",
|
298 |
+
"page_access_token": page_token
|
299 |
+
}
|
300 |
+
requests.post(AI_SERVICE_URL, json=payload, headers=headers, timeout=10)
|
301 |
+
except Exception as send_err:
|
302 |
+
print(f"Error sending fallback message: {str(send_err)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
|
304 |
return "OK", 200
|
305 |
|