File size: 86,266 Bytes
69418bc
d61ddbe
 
 
69418bc
d61ddbe
 
 
 
 
 
 
 
 
 
 
69418bc
d61ddbe
1eb3ba2
69418bc
 
 
1eb3ba2
69418bc
 
d61ddbe
69418bc
d61ddbe
 
69418bc
 
d61ddbe
 
 
69418bc
 
 
 
d61ddbe
69418bc
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
d61ddbe
 
 
1eb3ba2
 
d61ddbe
1eb3ba2
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
d61ddbe
 
 
 
 
69418bc
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
 
 
69418bc
d61ddbe
 
 
69418bc
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69418bc
d61ddbe
 
 
 
 
69418bc
d61ddbe
 
 
 
 
 
 
 
 
69418bc
d61ddbe
 
 
 
 
69418bc
d61ddbe
 
 
 
 
 
 
 
 
69418bc
d61ddbe
69418bc
d61ddbe
 
69418bc
d61ddbe
69418bc
 
d61ddbe
 
 
69418bc
1eb3ba2
69418bc
 
1eb3ba2
 
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
 
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
 
69418bc
1eb3ba2
 
 
 
69418bc
1eb3ba2
69418bc
1eb3ba2
 
 
69418bc
 
 
 
 
 
 
1eb3ba2
 
 
69418bc
 
1eb3ba2
 
 
 
69418bc
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
1eb3ba2
 
69418bc
 
 
 
 
 
 
 
 
1eb3ba2
 
 
69418bc
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
 
 
 
 
 
69418bc
 
d61ddbe
69418bc
d61ddbe
69418bc
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
d61ddbe
 
69418bc
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
1eb3ba2
 
69418bc
1eb3ba2
69418bc
 
 
1eb3ba2
69418bc
 
 
 
 
 
1eb3ba2
69418bc
 
 
1eb3ba2
 
 
69418bc
 
 
 
1eb3ba2
 
 
69418bc
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
1eb3ba2
69418bc
 
d61ddbe
69418bc
 
 
 
 
 
d61ddbe
1eb3ba2
69418bc
 
1eb3ba2
 
69418bc
 
 
 
d61ddbe
 
 
 
69418bc
d61ddbe
69418bc
 
 
d61ddbe
69418bc
d61ddbe
69418bc
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
 
69418bc
d61ddbe
 
69418bc
d61ddbe
69418bc
 
 
d61ddbe
69418bc
 
 
d61ddbe
 
69418bc
d61ddbe
69418bc
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
d61ddbe
 
 
 
 
 
69418bc
d61ddbe
69418bc
 
d61ddbe
69418bc
 
 
d61ddbe
69418bc
 
d61ddbe
 
 
 
69418bc
d61ddbe
69418bc
 
 
 
 
d61ddbe
69418bc
 
d61ddbe
69418bc
 
d61ddbe
 
69418bc
1eb3ba2
69418bc
 
 
 
 
1eb3ba2
69418bc
 
 
 
1eb3ba2
69418bc
 
1eb3ba2
69418bc
1eb3ba2
69418bc
 
 
 
 
 
 
 
1eb3ba2
 
69418bc
 
 
1eb3ba2
 
69418bc
 
 
 
 
 
1eb3ba2
 
 
 
 
 
 
 
69418bc
1eb3ba2
69418bc
1eb3ba2
69418bc
1eb3ba2
 
69418bc
1eb3ba2
 
69418bc
1eb3ba2
69418bc
1eb3ba2
69418bc
1eb3ba2
 
69418bc
1eb3ba2
 
69418bc
d61ddbe
69418bc
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
d61ddbe
69418bc
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
 
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
d61ddbe
69418bc
 
 
 
 
 
 
 
1eb3ba2
69418bc
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
 
69418bc
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
d61ddbe
69418bc
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1eb3ba2
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
d61ddbe
69418bc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
# filename: app_openai_updated.py
import gradio as gr
import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt # Not directly used for plotting
import plotly.graph_objects as go
import plotly.express as px
from datetime import datetime, timedelta
import random
import json
import os
import time
import requests
from typing import List, Dict, Any, Optional
import logging
from dotenv import load_dotenv
# import pytz # Not used
import uuid
import re
# import base64 # Not used
# from io import BytesIO # Not used
# from PIL import Image # Not used

# --- Use OpenAI library ---
import openai

# --- Load environment variables ---
load_dotenv()

# --- Set up logging ---
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# --- Configure API keys ---
# Make sure you have OPENAI_API_KEY and SERPER_API_KEY in your .env file or environment
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
SERPER_API_KEY = os.getenv("SERPER_API_KEY")

if not OPENAI_API_KEY:
    logger.warning("OPENAI_API_KEY not found. AI features will not work.")
    # You might want to raise an error or handle this case gracefully
if not SERPER_API_KEY:
    logger.warning("SERPER_API_KEY not found. Web search features will not work.")

# --- Initialize the OpenAI client ---
try:
    client = openai.OpenAI(api_key=OPENAI_API_KEY)
    # Test connection (optional, uncomment to test during startup)
    # client.models.list()
    logger.info("OpenAI client initialized successfully.")
except Exception as e:
    logger.error(f"Failed to initialize OpenAI client: {e}")
    # Handle error appropriately, maybe exit or set client to None
    client = None

# --- Model configuration ---
MODEL_ID = "gpt-4o" # Use OpenAI GPT-4o model

# --- Constants ---
EMOTIONS = ["Unmotivated", "Anxious", "Confused", "Excited", "Overwhelmed", "Discouraged"]
GOAL_TYPES = ["Get a job at a big company", "Find an internship", "Change careers", "Improve skills", "Network better"]
USER_DB_PATH = "user_database.json"
RESUME_FOLDER = "user_resumes"
PORTFOLIO_FOLDER = "user_portfolios"

# Ensure folders exist
os.makedirs(RESUME_FOLDER, exist_ok=True)
os.makedirs(PORTFOLIO_FOLDER, exist_ok=True)

# --- Tool Definitions for OpenAI ---
# Define functions that the AI can call.
# These will be implemented as Python functions below.

tools_list = [
    {
        "type": "function",
        "function": {
            "name": "get_job_opportunities",
            "description": "Search for relevant job opportunities based on query, location, and career goals using web search.",
            "parameters": {
                "type": "object",
                "properties": {
                     "query": {
                        "type": "string",
                        "description": "The specific job title, keyword, or role the user is searching for.",
                    },
                    "location": {
                        "type": "string",
                        "description": "The city, region, or country where the user wants to search for jobs.",
                    },
                    "max_results": {
                        "type": "integer",
                        "description": "Maximum number of job opportunities to return (default 5).",
                    },
                },
                "required": ["query", "location"],
            },
        }
    },
    {
        "type": "function",
        "function": {
            "name": "generate_document_template",
            "description": "Generate a document template (like a resume or cover letter) based on type, career field, and experience level.",
            "parameters": {
                "type": "object",
                "properties": {
                    "document_type": {
                        "type": "string",
                        "description": "Type of document (e.g., Resume, Cover Letter, Self-introduction).",
                    },
                    "career_field": {
                        "type": "string",
                        "description": "The career field or industry.",
                    },
                    "experience_level": {
                        "type": "string",
                        "description": "User's experience level (e.g., Entry, Mid, Senior).",
                    },
                },
                "required": ["document_type"],
            },
        }
    },
    {
        "type": "function",
        "function": {
            "name": "create_personalized_routine",
            "description": "Create a personalized daily or weekly career development routine based on the user's current emotion, goals, and available time.",
            "parameters": {
                "type": "object",
                "properties": {
                    "emotion": {
                        "type": "string",
                        "description": "User's current primary emotional state (e.g., Unmotivated, Anxious).",
                    },
                    "goal": {
                        "type": "string",
                        "description": "User's specific career goal for this routine.",
                    },
                    "available_time_minutes": {
                        "type": "integer",
                        "description": "Available time in minutes per day (default 60).",
                    },
                    "routine_length_days": {
                        "type": "integer",
                        "description": "Length of the routine in days (default 7).",
                    },
                },
                "required": ["emotion", "goal"],
            },
        }
    },
    {
        "type": "function",
        "function": {
            "name": "analyze_resume",
            "description": "Analyze the provided resume text and provide feedback, comparing it against the user's stated career goal.",
            "parameters": {
                "type": "object",
                "properties": {
                    "resume_text": {
                        "type": "string",
                        "description": "The full text of the user's resume.",
                    },
                    "career_goal": {
                        "type": "string",
                        "description": "The user's career goal or target job/industry to analyze against.",
                    },
                },
                "required": ["resume_text", "career_goal"],
            },
        }
    },
    {
        "type": "function",
        "function": {
            "name": "analyze_portfolio",
            "description": "Analyze a user's portfolio based on a URL (if provided) and a description, offering feedback relative to their career goal.",
            "parameters": {
                "type": "object",
                "properties": {
                    "portfolio_url": {
                        "type": "string",
                        "description": "URL to the user's online portfolio (optional).",
                    },
                    "portfolio_description": {
                        "type": "string",
                        "description": "Detailed description of the portfolio's content, purpose, and structure.",
                    },
                    "career_goal": {
                        "type": "string",
                        "description": "The user's career goal or target job/industry to analyze against.",
                    },
                },
                "required": ["portfolio_description", "career_goal"],
            },
        }
    },
     {
        "type": "function",
        "function": {
            "name": "extract_and_rate_skills_from_resume",
            "description": "Extracts key skills from resume text and rates them on a scale of 1-10 based on apparent proficiency shown in the resume.",
            "parameters": {
                "type": "object",
                "properties": {
                    "resume_text": {
                        "type": "string",
                        "description": "The full text of the user's resume.",
                    },
                     "max_skills": {
                        "type": "integer",
                        "description": "Maximum number of skills to extract (default 8).",
                    },
                },
                "required": ["resume_text"],
            },
        }
    }
]

# --- User Database Functions (Unchanged, adapted for history format if needed) ---
# [Previous database functions load_user_database, save_user_database, get_user_profile, update_user_profile, etc. remain largely the same]
# Ensure chat history format matches OpenAI's expected {role: 'user'/'assistant', content: 'message'}

def load_user_database():
    """Load user database from JSON file or create if it doesn't exist"""
    try:
        with open(USER_DB_PATH, 'r') as file:
            db = json.load(file)
            # Ensure chat history uses 'content' key for OpenAI compatibility
            for user_id in db.get('users', {}):
                if 'chat_history' not in db['users'][user_id]:
                    db['users'][user_id]['chat_history'] = []
                else:
                    # Convert old format if necessary
                    for msg in db['users'][user_id]['chat_history']:
                        if 'message' in msg and 'content' not in msg:
                            msg['content'] = msg.pop('message')
            return db
    except (FileNotFoundError, json.JSONDecodeError):
        db = {'users': {}}
        save_user_database(db)
        return db

def save_user_database(db):
    """Save user database to JSON file"""
    with open(USER_DB_PATH, 'w') as file:
        json.dump(db, file, indent=4)

def get_user_profile(user_id):
    """Get user profile from database or create new one"""
    db = load_user_database()
    if user_id not in db['users']:
        db['users'][user_id] = {
            "user_id": user_id,
            "name": "",
            "location": "",
            "current_emotion": "",
            "career_goal": "",
            "progress_points": 0,
            "completed_tasks": [],
            "upcoming_events": [],
            "routine_history": [],
            "daily_emotions": [],
            "resume_path": "",
            "portfolio_path": "",
            "recommendations": [],
            "chat_history": [], # Initialize chat history
            "joined_date": datetime.now().strftime("%Y-%m-%d")
        }
        save_user_database(db)
    # Ensure chat history uses 'content' key
    elif 'chat_history' not in db['users'][user_id] or \
         (db['users'][user_id]['chat_history'] and 'content' not in db['users'][user_id]['chat_history'][0]):
        if 'chat_history' not in db['users'][user_id]:
             db['users'][user_id]['chat_history'] = []
        else:
             for msg in db['users'][user_id]['chat_history']:
                 if 'message' in msg and 'content' not in msg:
                     msg['content'] = msg.pop('message')
        save_user_database(db)

    return db['users'][user_id]

def update_user_profile(user_id, updates):
    """Update user profile with new information"""
    db = load_user_database()
    if user_id in db['users']:
        for key, value in updates.items():
            db['users'][user_id][key] = value
        save_user_database(db)
    return db['users'][user_id]

def add_task_to_user(user_id, task):
    """Add a new task to user's completed tasks"""
    db = load_user_database()
    if user_id in db['users']:
        if 'completed_tasks' not in db['users'][user_id]:
            db['users'][user_id]['completed_tasks'] = []

        task_with_date = {
            "task": task,
            "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }
        db['users'][user_id]['completed_tasks'].append(task_with_date)
        db['users'][user_id]['progress_points'] += random.randint(10, 25) # Keep random points for now
        save_user_database(db)
    return db['users'][user_id]

def add_emotion_record(user_id, emotion):
    """Add a new emotion record to user's daily emotions"""
    db = load_user_database()
    if user_id in db['users']:
        if 'daily_emotions' not in db['users'][user_id]:
            db['users'][user_id]['daily_emotions'] = []

        emotion_record = {
            "emotion": emotion,
            "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        }
        db['users'][user_id]['daily_emotions'].append(emotion_record)
        db['users'][user_id]['current_emotion'] = emotion # Update current emotion
        save_user_database(db)
    return db['users'][user_id]

def add_routine_to_user(user_id, routine):
    """Add a new routine to user's routine history"""
    db = load_user_database()
    if user_id in db['users']:
        if 'routine_history' not in db['users'][user_id]:
            db['users'][user_id]['routine_history'] = []

        routine_with_date = {
            "routine": routine, # The AI generated routine JSON
            "start_date": datetime.now().strftime("%Y-%m-%d"),
            "end_date": (datetime.now() + timedelta(days=routine.get('days', 7))).strftime("%Y-%m-%d"),
            "completion": 0 # Start completion at 0
        }
        # Prepend to make the latest routine first (optional)
        db['users'][user_id]['routine_history'].insert(0, routine_with_date)
        save_user_database(db)
    return db['users'][user_id]


def save_user_resume(user_id, resume_text):
    """Save user's resume text to file and update profile path."""
    if not resume_text: return None
    filename = f"{user_id}_resume.txt"
    filepath = os.path.join(RESUME_FOLDER, filename)
    try:
        with open(filepath, 'w', encoding='utf-8') as file:
            file.write(resume_text)
        update_user_profile(user_id, {"resume_path": filepath})
        logger.info(f"Resume saved for user {user_id} at {filepath}")
        return filepath
    except Exception as e:
        logger.error(f"Error saving resume for user {user_id}: {e}")
        return None

def save_user_portfolio(user_id, portfolio_url, portfolio_description):
    """Save user's portfolio info (URL and description) to file."""
    if not portfolio_description: return None
    filename = f"{user_id}_portfolio.json"
    filepath = os.path.join(PORTFOLIO_FOLDER, filename)
    portfolio_content = {
        "url": portfolio_url,
        "description": portfolio_description,
        "saved_date": datetime.now().isoformat()
    }
    try:
        with open(filepath, 'w', encoding='utf-8') as file:
            json.dump(portfolio_content, file, indent=4)
        update_user_profile(user_id, {"portfolio_path": filepath})
        logger.info(f"Portfolio info saved for user {user_id} at {filepath}")
        return filepath
    except Exception as e:
        logger.error(f"Error saving portfolio info for user {user_id}: {e}")
        return None


def add_recommendation_to_user(user_id, recommendation):
    """Add a new recommendation object to user's list"""
    db = load_user_database()
    if user_id in db['users']:
        if 'recommendations' not in db['users'][user_id]:
            db['users'][user_id]['recommendations'] = []

        recommendation_with_date = {
            "recommendation": recommendation, # The AI generated recommendation object
            "date": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "status": "pending"  # pending, completed, dismissed
        }
        # Add to the beginning of the list
        db['users'][user_id]['recommendations'].insert(0, recommendation_with_date)
        # Optional: Limit the number of stored recommendations
        max_recs = 20
        if len(db['users'][user_id]['recommendations']) > max_recs:
            db['users'][user_id]['recommendations'] = db['users'][user_id]['recommendations'][:max_recs]

        save_user_database(db)
    return db['users'][user_id]

def add_chat_message(user_id, role, content):
    """Add a message to the user's chat history using OpenAI format."""
    db = load_user_database()
    if user_id in db['users']:
        if 'chat_history' not in db['users'][user_id]:
            db['users'][user_id]['chat_history'] = []

        # Basic validation
        if role not in ['user', 'assistant', 'system', 'tool']:
            logger.warning(f"Invalid role '{role}' provided for chat message.")
            return db['users'][user_id]
        if not content and role != 'tool': # Tool messages can have null content initially
             logger.warning(f"Empty content provided for chat role '{role}'.")
             # return db['users'][user_id] # Allow empty content for now?

        chat_message = {
            "role": role,
            "content": content, # Use 'content' key
            "timestamp": datetime.now().isoformat() # Use ISO format
        }
        db['users'][user_id]['chat_history'].append(chat_message)

        # Optional: Limit chat history length
        max_history = 50 # Keep last 50 messages (user + assistant)
        if len(db['users'][user_id]['chat_history']) > max_history:
             # Keep system prompt + last N messages
             system_msgs = [m for m in db['users'][user_id]['chat_history'] if m['role'] == 'system']
             other_msgs = [m for m in db['users'][user_id]['chat_history'] if m['role'] != 'system']
             db['users'][user_id]['chat_history'] = system_msgs + other_msgs[-max_history:]

        save_user_database(db)
    return db['users'][user_id]

# --- Tool Implementation Functions ---
# These functions are called when the AI decides to use a tool.

def get_job_opportunities(query: str, location: str, max_results: int = 5) -> str:
    """
    Searches for job opportunities using the Serper API based on a query and location.
    Returns a JSON string of the search results or an error message.
    """
    logger.info(f"Executing tool: get_job_opportunities(query='{query}', location='{location}', max_results={max_results})")
    if not SERPER_API_KEY:
        return json.dumps({"error": "Serper API key is not configured."})

    try:
        headers = {
            'X-API-KEY': SERPER_API_KEY,
            'Content-Type': 'application/json'
        }
        params = {
            'q': f"{query} jobs in {location}",
            'num': max_results,
            'location': location # Add location parameter explicitly if API supports it
        }
        logger.info(f"Calling Serper API with params: {params}")
        response = requests.get(
            'https://serper.dev/search', # Use the correct Serper endpoint
            headers=headers,
            params=params,
            timeout=10 # Add a timeout
        )
        response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)

        data = response.json()
        logger.info(f"Serper API response received (keys: {data.keys()})")

        # Extract relevant job listings (adapt based on Serper's actual output structure)
        job_results = []
        # Check 'jobs' key first, as it's common in job search results
        if 'jobs' in data and isinstance(data['jobs'], list):
             for item in data['jobs']:
                 job_results.append({
                     'title': item.get('title', 'N/A'),
                     'company': item.get('company_name', item.get('source', 'Unknown Company')), # Try different fields
                     'description': item.get('description', item.get('snippet', 'No description provided.')),
                     'link': item.get('link', '#'),
                     'location': item.get('location', location), # Use provided location if not in result
                     'date_posted': item.get('detected_extensions', {}).get('posted_at', 'N/A') # Example nested field
                 })
        # Fallback to organic results if 'jobs' key is not present or empty
        elif 'organic' in data and not job_results:
            logger.info("Parsing 'organic' results for jobs.")
            for item in data['organic']:
                 # Heuristic check if it looks like a job listing
                title = item.get('title', '')
                snippet = item.get('snippet', '')
                if any(keyword in title.lower() for keyword in ['job', 'career', 'hiring', 'position', 'vacancy']) or \
                   any(keyword in snippet.lower() for keyword in ['apply', 'responsibilities', 'qualifications']):
                    job_results.append({
                        'title': title,
                        'company': item.get('source', extract_company_from_title(title)), # Use source or extract
                        'description': snippet,
                        'link': item.get('link', '#'),
                        'location': location, # Serper organic results might not specify location clearly
                        'date_posted': 'Recent' # Often not available in organic results
                    })

        if not job_results:
             logger.warning(f"No job results extracted from Serper response for query '{query}' in '{location}'.")
             return json.dumps({"message": "No job opportunities found for your query.", "results": []})

        logger.info(f"Extracted {len(job_results)} job results.")
        # Return results as a JSON string for the AI
        return json.dumps({"message": f"Found {len(job_results)} potential job opportunities.", "results": job_results})

    except requests.exceptions.RequestException as e:
        logger.error(f"Error calling Serper API: {e}")
        return json.dumps({"error": f"Could not connect to job search service: {e}"})
    except Exception as e:
        logger.error(f"Exception in get_job_opportunities tool: {e}")
        return json.dumps({"error": f"An unexpected error occurred during job search: {e}"})

def extract_company_from_title(title):
    """Simple helper to guess company name from job title string."""
    # Improved heuristic
    delimiters = [' at ', ' - ', ' | ', ' hiring ', ' for ']
    for delim in delimiters:
        if delim in title:
            parts = title.split(delim)
            # Take the part after the delimiter, unless it looks like a job title itself
            potential_company = parts[-1].strip()
            if len(potential_company) > 1 and not any(kw in potential_company.lower() for kw in ['developer', 'manager', 'engineer', 'analyst']):
                 return potential_company
    # If no delimiter found or extraction failed, return default
    return "Unknown Company"


# --- Implement other tool functions ---
def generate_document_template(document_type: str, career_field: str = "", experience_level: str = "") -> str:
    """Generates a basic markdown template for the specified document type."""
    logger.info(f"Executing tool: generate_document_template(document_type='{document_type}', career_field='{career_field}', experience_level='{experience_level}')")
    # This function *could* call the AI again for a more detailed template,
    # but for simplicity, we'll return a predefined basic structure here.
    # A real implementation would likely use the AI.
    template = f"## Basic Template: {document_type}\n\n"
    template += f"**Target Field:** {career_field or 'Not specified'}\n"
    template += f"**Experience Level:** {experience_level or 'Not specified'}\n\n"

    if "resume" in document_type.lower():
        template += (
            "### Contact Information\n"
            "- Name:\n- Phone:\n- Email:\n- LinkedIn:\n- Portfolio (Optional):\n\n"
            "### Summary/Objective\n"
            "- [Write 2-3 sentences summarizing your key skills and career goals relevant to the target field/job]\n\n"
            "### Experience\n"
            "- **Company Name** | Location | Job Title | Start Date - End Date\n"
            "  - [Quantifiable achievement 1 using action verbs]\n"
            "  - [Quantifiable achievement 2 using action verbs]\n\n"
            "### Education\n"
            "- University Name | Degree | Graduation Date\n\n"
            "### Skills\n"
            "- Technical Skills: [List relevant software, tools, languages]\n"
            "- Soft Skills: [List relevant interpersonal skills]\n"
        )
    elif "cover letter" in document_type.lower():
        template += (
            "[Your Name]\n[Your Address]\n[Your Phone]\n[Your Email]\n\n"
            "[Date]\n\n"
            "[Hiring Manager Name (if known), or Title]\n[Company Name]\n[Company Address]\n\n"
            "Dear [Mr./Ms./Mx. Hiring Manager Last Name or Hiring Team],\n\n"
            "**Introduction:** [State the position you're applying for and where you saw it. Briefly mention your key qualification or enthusiasm.]\n\n"
            "**Body Paragraph(s):** [Connect your skills and experience directly to the job requirements. Provide specific examples. Explain why you are interested in this company and role.]\n\n"
            "**Conclusion:** [Reiterate your interest and key qualification. State your call to action (e.g., looking forward to discussing). Thank the reader.]\n\n"
            "Sincerely,\n[Your Name]"
        )
    else:
        template += "[Structure for this document type needs to be defined.]"

    return json.dumps({"template_markdown": template}) # Return as JSON string

def create_personalized_routine(emotion: str, goal: str, available_time_minutes: int = 60, routine_length_days: int = 7) -> str:
    """Creates a basic personalized routine structure."""
    logger.info(f"Executing tool: create_personalized_routine(emotion='{emotion}', goal='{goal}', time={available_time_minutes}, days={routine_length_days})")
    # Similar to template generation, this could call the AI for a detailed plan.
    # Here, we generate a basic fallback structure.
    # A real implementation should use the AI for better personalization.
    routine = generate_basic_routine(emotion, goal, available_time_minutes, routine_length_days) # Use the existing fallback
    logger.info(f"Generated basic routine: {routine['name']}")
    # Add routine to user profile
    # user_profile = add_routine_to_user(session_user_id, routine) # Need user_id here! Pass it if possible.
    # For now, just return the routine structure. The main chat logic should handle saving it.
    return json.dumps(routine) # Return JSON string

def analyze_resume(resume_text: str, career_goal: str) -> str:
    """Provides a basic analysis structure for the resume."""
    logger.info(f"Executing tool: analyze_resume(career_goal='{career_goal}', resume_length={len(resume_text)})")
    # This should ideally call the AI for actual analysis.
    # Returning a placeholder structure for now.
    analysis = {
        "strengths": ["Identified strength 1 based on AI analysis (placeholder).", "Identified strength 2 (placeholder)."],
        "areas_for_improvement": ["Suggestion 1 for improvement (placeholder).", "Suggestion 2 based on goal alignment (placeholder)."],
        "format_feedback": "General feedback on format (placeholder).",
        "content_feedback": f"Feedback on content relevance to '{career_goal}' (placeholder).",
        "next_steps": ["Recommended action 1 (placeholder).", "Recommended action 2 (placeholder)."]
    }
    # Save the resume text (need user_id)
    # save_user_resume(session_user_id, resume_text) # Pass user_id if available
    return json.dumps({"analysis": analysis}) # Return JSON string

def analyze_portfolio(portfolio_description: str, career_goal: str, portfolio_url: str = "") -> str:
    """Provides a basic analysis structure for the portfolio."""
    logger.info(f"Executing tool: analyze_portfolio(career_goal='{career_goal}', url='{portfolio_url}', desc_length={len(portfolio_description)})")
    # Placeholder analysis
    analysis = {
        "alignment_with_goal": f"Assessment of alignment with '{career_goal}' (placeholder).",
        "strengths": ["Portfolio strength 1 (placeholder).", "Portfolio strength 2 (placeholder)."],
        "areas_for_improvement": ["Suggestion 1 for portfolio enhancement (placeholder)."],
        "presentation_feedback": "Feedback on presentation/UX (placeholder).",
        "next_steps": ["Recommended action for portfolio (placeholder)."]
    }
     # Save portfolio info (need user_id)
    # save_user_portfolio(session_user_id, portfolio_url, portfolio_description) # Pass user_id if available
    return json.dumps({"analysis": analysis}) # Return JSON string


def extract_and_rate_skills_from_resume(resume_text: str, max_skills: int = 8) -> str:
    """
    Placeholder function to simulate skill extraction and rating.
    In a real scenario, this would involve more sophisticated NLP or another AI call.
    """
    logger.info(f"Executing tool: extract_and_rate_skills_from_resume(resume_length={len(resume_text)}, max_skills={max_skills})")

    # Simple keyword spotting for demonstration
    possible_skills = ["Python", "Java", "Project Management", "Communication", "Data Analysis", "Teamwork", "Leadership", "SQL", "React", "Customer Service", "Problem Solving", "Microsoft Office"]
    found_skills = []
    resume_lower = resume_text.lower()
    for skill in possible_skills:
        if skill.lower() in resume_lower:
            # Assign a random score for demonstration
            found_skills.append({"name": skill, "score": random.randint(4, 9)})
        if len(found_skills) >= max_skills:
            break

    # Ensure we return *some* skills if none automatically found
    if not found_skills:
        found_skills = [
            {"name": "Communication", "score": random.randint(5,8)},
            {"name": "Teamwork", "score": random.randint(5,8)},
             {"name": "Problem Solving", "score": random.randint(5,8)},
        ]


    logger.info(f"Extracted skills (placeholder): {[s['name'] for s in found_skills]}")
    return json.dumps({"skills": found_skills[:max_skills]}) # Return JSON string


# --- AI Interaction Logic (Using OpenAI) ---

def get_ai_response(user_id: str, user_input: str, generate_recommendations: bool = True) -> str:
    """
    Gets a response from the OpenAI API, handling context, system prompt, and tool calls.
    """
    logger.info(f"Getting AI response for user {user_id}. Input: '{user_input[:100]}...'")
    if not client:
        return "I apologize, the AI service is currently unavailable. Please check the configuration."

    try:
        user_profile = get_user_profile(user_id)

        # --- System Prompt ---
        system_prompt = f"""
        You are Aishura, an emotionally intelligent AI career assistant. Your primary goal is to provide empathetic,
        realistic, and actionable career guidance. Always follow these steps:
        1.  Acknowledge the user's message and, if applicable, their expressed emotion (from their profile: '{user_profile.get('current_emotion', 'Not specified')}' or message). Use empathetic language.
        2.  Directly address the user's query or statement.
        3.  Proactively offer relevant support using your tools: suggest searching for jobs (`get_job_opportunities`), generating document templates (`generate_document_template`), creating a personalized routine (`create_personalized_routine`), analyzing their resume (`analyze_resume`) or portfolio (`analyze_portfolio`) if they've provided them or mention doing so.
        4.  Tailor your response based on the user's profile:
            - Name: {user_profile.get('name', 'User')}
            - Location: {user_profile.get('location', 'Not specified')}
            - Stated Career Goal: {user_profile.get('career_goal', 'Not specified')}
            - Recent Emotion: {user_profile.get('current_emotion', 'Not specified')}
        5.  If the user has uploaded a resume or portfolio (check profile paths: resume='{user_profile.get('resume_path', '')}', portfolio='{user_profile.get('portfolio_path', '')}'), mention you can analyze them or use insights from previous analysis if available.
        6.  Keep responses concise, friendly, and focused on next steps. Avoid overly long paragraphs.
        7.  Use markdown for formatting (bolding, lists) where appropriate.
        """

        # --- Build Message History ---
        messages = [{"role": "system", "content": system_prompt}]

        # Add recent chat history (ensure it's in OpenAI format)
        chat_history = user_profile.get('chat_history', [])
        # Append only user/assistant messages with 'content' key
        for msg in chat_history:
             if msg.get('role') in ['user', 'assistant'] and 'content' in msg:
                 messages.append({"role": msg['role'], "content": msg['content']})
             elif msg.get('role') == 'tool' and 'tool_call_id' in msg and 'name' in msg and 'content' in msg:
                  # Reconstruct tool call response message correctly
                  messages.append({
                       "role": "tool",
                       "tool_call_id": msg['tool_call_id'],
                       "name": msg['name'],
                       "content": msg['content'] # Content should be the JSON string result from the tool function
                  })


        # Add current user input
        messages.append({"role": "user", "content": user_input})

        # --- Initial API Call ---
        logger.info(f"Sending {len(messages)} messages to OpenAI model {MODEL_ID}.")
        response = client.chat.completions.create(
            model=MODEL_ID,
            messages=messages,
            tools=tools_list,
            tool_choice="auto", # Let the model decide whether to use tools
            temperature=0.7,
            max_tokens=1024 # Adjust as needed
        )

        response_message = response.choices[0].message
        logger.info("Received initial response from OpenAI.")

        # --- Tool Call Handling ---
        tool_calls = response_message.tool_calls
        if tool_calls:
            logger.info(f"AI requested {len(tool_calls)} tool call(s): {[tc.function.name for tc in tool_calls]}")
            # Append the assistant's response message that contains the tool calls
            messages.append(response_message)

            # --- Execute Tools and Get Results ---
            available_functions = {
                "get_job_opportunities": get_job_opportunities,
                "generate_document_template": generate_document_template,
                "create_personalized_routine": create_personalized_routine,
                "analyze_resume": analyze_resume,
                "analyze_portfolio": analyze_portfolio,
                "extract_and_rate_skills_from_resume": extract_and_rate_skills_from_resume,
            }

            for tool_call in tool_calls:
                function_name = tool_call.function.name
                function_to_call = available_functions.get(function_name)
                function_args = json.loads(tool_call.function.arguments) # Arguments are provided as a JSON string

                if function_to_call:
                    try:
                         # Special handling for functions needing user_id or profile info
                        if function_name in ["analyze_resume", "analyze_portfolio", "create_personalized_routine"]:
                            # Add user_id or necessary profile elements to args if needed by the function
                            # e.g., function_args['user_id'] = user_id
                             # Pass career goal from profile if not in direct args for analysis functions
                             if function_name == "analyze_resume" and 'career_goal' not in function_args:
                                 function_args['career_goal'] = user_profile.get('career_goal', 'Not specified')
                             if function_name == "analyze_portfolio" and 'career_goal' not in function_args:
                                 function_args['career_goal'] = user_profile.get('career_goal', 'Not specified')

                             # Save files when analysis tools are called
                             if function_name == "analyze_resume":
                                 save_user_resume(user_id, function_args.get('resume_text', ''))
                             if function_name == "analyze_portfolio":
                                 save_user_portfolio(user_id, function_args.get('portfolio_url', ''), function_args.get('portfolio_description', ''))


                        # Call the function with unpacked arguments
                        logger.info(f"Calling function '{function_name}' with args: {function_args}")
                        function_response = function_to_call(**function_args)
                        logger.info(f"Function '{function_name}' returned (type: {type(function_response)}): {str(function_response)[:200]}...")

                        # Append tool response to messages
                        messages.append(
                            {
                                "tool_call_id": tool_call.id,
                                "role": "tool",
                                "name": function_name,
                                "content": function_response, # Must be a string (JSON string in our case)
                            }
                        )
                         # Also add tool call result to chat history DB
                        add_chat_message(user_id, "tool", {
                             "tool_call_id": tool_call.id,
                             "name": function_name,
                             "content": function_response # Save the JSON string result
                        })


                    except Exception as e:
                        logger.error(f"Error executing function {function_name}: {e}")
                        messages.append(
                            {
                                "tool_call_id": tool_call.id,
                                "role": "tool",
                                "name": function_name,
                                "content": json.dumps({"error": f"Failed to execute tool {function_name}: {e}"}),
                            }
                        )
                        # Also add error to chat history DB
                        add_chat_message(user_id, "tool", {
                             "tool_call_id": tool_call.id,
                             "name": function_name,
                             "content": json.dumps({"error": f"Failed to execute tool {function_name}: {e}"})
                        })


                else:
                    logger.warning(f"Function {function_name} requested by AI but not found.")
                    # Append a message indicating the function wasn't found
                    messages.append(
                        {
                             "tool_call_id": tool_call.id,
                             "role": "tool",
                             "name": function_name,
                             "content": json.dumps({"error": f"Tool '{function_name}' is not available."})
                        }
                    )
                    add_chat_message(user_id, "tool", {
                         "tool_call_id": tool_call.id,
                         "name": function_name,
                         "content": json.dumps({"error": f"Tool '{function_name}' is not available."})
                    })


            # --- Second API Call (after tool execution) ---
            logger.info(f"Sending {len(messages)} messages to OpenAI (including tool results).")
            second_response = client.chat.completions.create(
                model=MODEL_ID,
                messages=messages,
                temperature=0.7,
                max_tokens=1024
                # No tool_choice here, we expect a natural language response
            )
            final_response_content = second_response.choices[0].message.content
            logger.info("Received final response from OpenAI after tool calls.")

        else:
            # No tool calls were made, use the first response
            final_response_content = response_message.content
            logger.info("No tool calls requested by AI.")


        # --- Post-processing and Saving ---
        if not final_response_content:
             final_response_content = "I received that, but I don't have a specific response right now. Could you try rephrasing?"
             logger.warning("AI returned empty content.")


        # Save user message and final AI response to DB
        add_chat_message(user_id, "user", user_input)
        # Check if the last message added was the assistant's message with tool calls
        if messages[-1]['role'] == 'assistant' and messages[-1].tool_calls:
             # Don't add the tool call message itself to the history again,
             # just add the final text response
             pass
        elif messages[-1]['role'] == 'tool':
             # If the last message was a tool response, the final content comes from the second call
             pass
        else:
             # If no tools were called, the first response message needs saving
             add_chat_message(user_id, "assistant", final_response_content)


        # Generate recommendations (consider doing this asynchronously)
        if generate_recommendations:
            # This could be a separate AI call based on the final interaction
            # For simplicity, we'll skip detailed recommendation generation here
            # but you would call a function like `gen_recommendations_openai`
            # gen_recommendations_openai(user_id, user_input, final_response_content)
            pass # Placeholder for recommendation generation logic

        return final_response_content

    except openai.APIError as e:
        logger.error(f"OpenAI API returned an API Error: {e}")
        return f"I'm sorry, there was an error communicating with the AI service (API Error: {e.status_code}). Please try again later."
    except openai.APIConnectionError as e:
        logger.error(f"Failed to connect to OpenAI API: {e}")
        return "I'm sorry, I couldn't connect to the AI service. Please check your connection and try again."
    except openai.RateLimitError as e:
        logger.error(f"OpenAI API request exceeded rate limit: {e}")
        return "I'm currently experiencing high demand. Please try again in a few moments."
    except Exception as e:
        # Log the full traceback for debugging
        logger.exception(f"Unexpected error in get_ai_response for user {user_id}: {e}")
        return "I apologize, but an unexpected error occurred while processing your request. Please try again."


# --- Recommendation Generation (Placeholder - Adapt for OpenAI) ---
def gen_recommendations_openai(user_id, user_input, ai_response):
    """Generate recommendations using OpenAI (Adapt prompt and parsing)."""
    logger.info(f"Generating recommendations for user {user_id}")
    if not client:
        logger.warning("OpenAI client not available for generating recommendations.")
        return []

    try:
        user_profile = get_user_profile(user_id)

        prompt = f"""
        Based on the following user profile and recent conversation, generate 1-3 specific, actionable recommendations
        for the user's next steps in their career journey. Focus on practical actions they can take soon.

        User Profile:
        - Current emotion: {user_profile.get('current_emotion', 'Not specified')}
        - Career goal: {user_profile.get('career_goal', 'Not specified')}
        - Location: {user_profile.get('location', 'Not specified')}
        - Recent chat history is available to the main assistant.

        Most Recent Interaction:
        User: {user_input}
        Aishura (AI Assistant): {ai_response}

        Generate recommendations in this JSON format only:
        ```json
        [
          {{
            "title": "Concise recommendation title (e.g., 'Refine Resume Keywords')",
            "description": "Detailed explanation of the recommendation and why it's relevant (2-3 sentences).",
            "action_type": "job_search | skill_building | networking | resume_update | portfolio_review | interview_prep | mindset_shift | other",
            "priority": "high | medium | low"
          }}
        ]
        ```
        Provide only the JSON array, no introductory text.
        """

        response = client.chat.completions.create(
            model=MODEL_ID, # Or a faster/cheaper model if preferred for this task
            messages=[
                {"role": "system", "content": "You are an expert career advisor generating concise, actionable recommendations in JSON format."},
                {"role": "user", "content": prompt}
            ],
            temperature=0.5,
            max_tokens=512,
            response_format={"type": "json_object"} # Request JSON output if model supports it
        )

        recommendation_json_str = response.choices[0].message.content
        logger.info(f"Raw recommendations JSON string: {recommendation_json_str}")


        # Attempt to parse the JSON
        try:
            # The response_format parameter should ensure it's valid JSON, but double-check
            # Clean potential markdown fences if response_format didn't work
            if recommendation_json_str.startswith("```json"):
                recommendation_json_str = recommendation_json_str.split("```json")[1].split("```")[0].strip()

            # The prompt asks for a list, but response_format might enforce an object. Adjust parsing.
            recommendations_data = json.loads(recommendation_json_str)

            # If the root is an object with a key like "recommendations", extract the list
            if isinstance(recommendations_data, dict) and "recommendations" in recommendations_data and isinstance(recommendations_data["recommendations"], list):
                 recommendations = recommendations_data["recommendations"]
            elif isinstance(recommendations_data, list):
                 recommendations = recommendations_data # It's already a list
            else:
                 logger.error(f"Unexpected JSON structure for recommendations: {type(recommendations_data)}")
                 return []


            # Add valid recommendations to user profile
            valid_recs_added = 0
            for rec in recommendations:
                 # Basic validation of recommendation structure
                 if isinstance(rec, dict) and all(k in rec for k in ['title', 'description', 'action_type', 'priority']):
                     add_recommendation_to_user(user_id, rec)
                     valid_recs_added += 1
                 else:
                      logger.warning(f"Skipping invalid recommendation format: {rec}")

            logger.info(f"Added {valid_recs_added} recommendations for user {user_id}")
            return recommendations # Return the raw list parsed

        except json.JSONDecodeError as e:
            logger.error(f"Failed to parse JSON recommendations from AI response: {e}\nResponse: {recommendation_json_str}")
            return []
        except Exception as e:
             logger.exception(f"Error processing recommendations: {e}")
             return []

    except Exception as e:
        logger.exception(f"Error in gen_recommendations_openai: {e}")
        return []


# --- Chart and Visualization Functions (Unchanged, but depend on data format) ---
# [Keep create_emotion_chart, create_progress_chart, create_routine_completion_gauge]
# Ensure they handle the data structures saved by the updated functions correctly.

def create_emotion_chart(user_id):
    """Create a chart of user's emotions over time"""
    user_profile = get_user_profile(user_id)
    emotion_records = user_profile.get('daily_emotions', [])

    if not emotion_records:
        fig = go.Figure()
        fig.add_annotation(text="No emotion data tracked yet.", align='center', showarrow=False)
        fig.update_layout(title="Emotion Tracking")
        return fig

    emotion_values = {
        "Unmotivated": 1, "Anxious": 2, "Confused": 3,
        "Discouraged": 4, "Overwhelmed": 5, "Excited": 6
    }
    dates = [datetime.fromisoformat(record['date']) if isinstance(record['date'], str) else datetime.strptime(record['date'], "%Y-%m-%d %H:%M:%S") for record in emotion_records] # Handle ISO or older format
    emotion_scores = [emotion_values.get(record['emotion'], 3) for record in emotion_records]
    emotion_names = [record['emotion'] for record in emotion_records]

    df = pd.DataFrame({'Date': dates, 'Emotion Score': emotion_scores, 'Emotion': emotion_names})
    df = df.sort_values('Date') # Ensure chronological order

    fig = px.line(df, x='Date', y='Emotion Score', markers=True,
                  labels={"Emotion Score": "Emotional State"},
                  title="Your Emotional Journey")
    fig.update_traces(hovertemplate='%{x|%Y-%m-%d %H:%M}<br>Feeling: %{text}', text=df['Emotion'])
    fig.update_yaxes(tickvals=list(emotion_values.values()), ticktext=list(emotion_values.keys()))
    return fig

def create_progress_chart(user_id):
    """Create a chart showing user's progress points over time"""
    user_profile = get_user_profile(user_id)
    tasks = user_profile.get('completed_tasks', [])

    if not tasks:
        fig = go.Figure()
        fig.add_annotation(text="No tasks completed yet.", align='center', showarrow=False)
        fig.update_layout(title="Progress Tracking")
        return fig

    # Ensure tasks have points (might need adjustment based on how points are awarded)
    points_per_task = 20 # Example: Assign fixed points if not stored with task
    dates = []
    cumulative_points = 0
    points_timeline = []
    task_labels = []

    # Sort tasks by date
    tasks.sort(key=lambda x: datetime.fromisoformat(x['date']) if isinstance(x['date'], str) else datetime.strptime(x['date'], "%Y-%m-%d %H:%M:%S"))

    for task in tasks:
        task_date = datetime.fromisoformat(task['date']) if isinstance(task['date'], str) else datetime.strptime(task['date'], "%Y-%m-%d %H:%M:%S")
        dates.append(task_date)
        # Use points from profile if calculated there, otherwise estimate
        # We are using the cumulative points stored in the profile directly now
        # For simplicity, let's recalculate cumulative points for the chart
        cumulative_points += task.get('points', points_per_task) # Use stored points if available
        points_timeline.append(cumulative_points)
        task_labels.append(task['task'])


    df = pd.DataFrame({'Date': dates, 'Points': points_timeline, 'Task': task_labels})

    fig = px.line(df, x='Date', y='Points', markers=True, title="Your Career Journey Progress")
    fig.update_traces(hovertemplate='%{x|%Y-%m-%d %H:%M}<br>Points: %{y}<br>Completed: %{text}', text=df['Task'])
    return fig

def create_routine_completion_gauge(user_id):
    """Create a gauge chart showing routine completion percentage"""
    user_profile = get_user_profile(user_id)
    routines = user_profile.get('routine_history', [])

    if not routines:
        fig = go.Figure(go.Indicator(mode="gauge", value=0, title={'text': "Routine Completion"}))
        fig.add_annotation(text="No active routine.", showarrow=False)
        return fig

    # Get the most recent routine (assuming prepend logic)
    latest_routine = routines[0]
    completion = latest_routine.get('completion', 0)
    routine_name = latest_routine.get('routine', {}).get('name', 'Current Routine')

    fig = go.Figure(go.Indicator(
        mode = "gauge+number",
        value = completion,
        domain = {'x': [0, 1], 'y': [0, 1]},
        title = {'text': f"{routine_name} Completion"},
        gauge = {
            'axis': {'range': [0, 100], 'tickwidth': 1, 'tickcolor': "darkblue"},
            'bar': {'color': "cornflowerblue"},
            'bgcolor': "white",
            'borderwidth': 2,
            'bordercolor': "gray",
            'steps': [
                {'range': [0, 50], 'color': 'whitesmoke'},
                {'range': [50, 80], 'color': 'lightgray'}],
            'threshold': {
                'line': {'color': "green", 'width': 4},
                'thickness': 0.75, 'value': 90}})) # Threshold at 90%
    return fig


def create_skill_radar_chart(user_id):
    """
    Creates a radar chart of user's skills.
    Requires skills data, potentially extracted by `extract_and_rate_skills_from_resume` tool.
    """
    logger.info(f"Creating skill radar chart for user {user_id}")
    user_profile = get_user_profile(user_id)
    resume_path = user_profile.get('resume_path')

    if not resume_path or not os.path.exists(resume_path):
        logger.warning("No resume path found or file missing for skill chart.")
        fig = go.Figure()
        fig.add_annotation(text="Upload & Analyze Resume for Skill Chart", showarrow=False)
        fig.update_layout(title="Skill Assessment")
        return fig

    try:
        with open(resume_path, 'r', encoding='utf-8') as f:
            resume_text = f.read()

        # Use the tool function to extract skills (simulated call here)
        # In a real app, this might be triggered explicitly or data stored after analysis
        skills_json_str = extract_and_rate_skills_from_resume(resume_text=resume_text)
        skill_data = json.loads(skills_json_str)

        if 'skills' in skill_data and skill_data['skills']:
            skills = skill_data['skills']
            # Limit to max 8 skills for readability
            skills = skills[:8]

            categories = [skill['name'] for skill in skills]
            values = [skill['score'] for skill in skills]

            # Ensure the loop closes
            if len(categories) > 2:
                 categories.append(categories[0])
                 values.append(values[0])

            fig = go.Figure()
            fig.add_trace(go.Scatterpolar(
                r=values,
                theta=categories,
                fill='toself',
                name='Skills'
            ))
            fig.update_layout(
                polar=dict(radialaxis=dict(visible=True, range=[0, 10])),
                showlegend=False,
                title="Skill Assessment (Based on Resume)"
            )
            logger.info(f"Successfully created radar chart with {len(skills)} skills.")
            return fig
        else:
            logger.warning("Could not extract skills from resume for chart.")
            fig = go.Figure()
            fig.add_annotation(text="Could not extract skills from resume", showarrow=False)
            fig.update_layout(title="Skill Assessment")
            return fig

    except Exception as e:
        logger.exception(f"Error creating skill radar chart: {e}")
        fig = go.Figure()
        fig.add_annotation(text="Error analyzing skills", showarrow=False)
        fig.update_layout(title="Skill Assessment")
        return fig

# --- Gradio Interface Components ---
def create_interface():
    """Create the Gradio interface for Aishura"""

    # Generate a unique user ID for this session (can be replaced with login later)
    # This state needs careful handling in Gradio for multi-user scenarios.
    # Using a simple global or closure for demo purposes.
    # A better approach involves Gradio's State management or user handling.
    session_user_id = str(uuid.uuid4())
    logger.info(f"Initializing Gradio interface for session user ID: {session_user_id}")
    # Initialize profile for session user
    get_user_profile(session_user_id)


    # --- Event Handlers for Gradio Components ---

    def welcome(name, location, emotion, goal):
        """Handles the initial welcome screen submission."""
        logger.info(f"Welcome action for user {session_user_id}: name='{name}', loc='{location}', emo='{emotion}', goal='{goal}'")
        if not all([name, location, emotion, goal]):
            return ("Please fill out all fields to get started.",
                    gr.update(visible=True), # Keep welcome visible
                    gr.update(visible=False)) # Keep main hidden

        # Update profile
        update_user_profile(session_user_id, {
            "name": name, "location": location, "career_goal": goal
        })
        add_emotion_record(session_user_id, emotion) # Record initial emotion

        # Generate initial AI message based on input
        initial_input = f"Hi Aishura! I'm {name} from {location}. I'm currently feeling {emotion}, and my main goal is to {goal}. Can you help me get started?"
        ai_response = get_ai_response(session_user_id, initial_input, generate_recommendations=True)

        # Initial chat history
        initial_chat = [(initial_input, ai_response)]

        # Initial charts
        emotion_fig = create_emotion_chart(session_user_id)
        progress_fig = create_progress_chart(session_user_id)
        routine_fig = create_routine_completion_gauge(session_user_id)
        skill_fig = create_skill_radar_chart(session_user_id) # Will be empty initially

        # Output: Hide welcome, show main, populate initial chat and charts
        return (gr.update(value=initial_chat), # Update chatbot
                gr.update(visible=False),      # Hide welcome group
                gr.update(visible=True),       # Show main interface
                gr.update(figure=emotion_fig),
                gr.update(figure=progress_fig),
                gr.update(figure=routine_fig),
                gr.update(figure=skill_fig)
                )


    def chat_submit(message, history):
        """Handles sending a message in the chatbot."""
        logger.info(f"Chat submit for user {session_user_id}: '{message[:50]}...'")
        if not message:
             return history, "" # Do nothing if message is empty

        ai_response = get_ai_response(session_user_id, message, generate_recommendations=True)
        history.append((message, ai_response))

        # Update recommendations display after chat
        recommendations_md = display_recommendations(session_user_id)

        return history, "", gr.update(value=recommendations_md) # Return updated history, clear input, update recs


    # --- Simulation for Emotion Messages ---
    pause_message = "Take your time, we’re here when you're ready."
    retype_message = "It doesn’t have to be perfect. Let’s just begin."

    # JS for basic simulation (might need refinement based on Gradio version/behavior)
    # This is illustrative; direct JS injection can be tricky/fragile in Gradio.
    # We'll use Gradio events for a simpler simulation.

    def show_pause_message():
        # Simulate showing pause message (e.g., make a Markdown visible)
        # In a real app, this needs proper timing logic (JS setTimeout)
        # logger.info("Simulating 'pause' message visibility.")
        return gr.update(value=pause_message, visible=True)

    def show_retype_message():
        # Simulate showing retype message
        # logger.info("Simulating 'retype' message visibility.")
        return gr.update(value=retype_message, visible=True)

    def hide_emotion_message():
         # logger.info("Hiding emotion message.")
         return gr.update(value="", visible=False)


    def handle_chat_focus():
        """Called when chat input gains focus."""
        # logger.info("Chat input focused.")
        # Decide whether to show a message, e.g., maybe the retype one briefly?
        # Or just hide any existing message.
        return hide_emotion_message() # Hide message on focus for now


    # Placeholder: More complex logic would be needed for actual pause/retype detection
    # Using .change() with debounce might approximate it, but Gradio support varies.

    # --- Tool Interface Handlers ---

    def search_jobs_interface_handler(query, location, max_results):
        """Handles the Job Search button click."""
        logger.info(f"Manual Job Search UI: query='{query}', loc='{location}', num={max_results}")
        # Call the underlying tool function directly for the UI button
        results_json_str = get_job_opportunities(query, location, int(max_results))
        try:
            results_data = json.loads(results_json_str)
            if "error" in results_data:
                return f"Error: {results_data['error']}"
            if not results_data.get("results"):
                return "No job opportunities found matching your criteria."

            output_md = f"## Job Opportunities Found ({len(results_data['results'])})\n\n"
            for i, job in enumerate(results_data['results'], 1):
                output_md += f"### {i}. {job.get('title', 'N/A')}\n"
                output_md += f"**Company:** {job.get('company', 'N/A')}\n"
                output_md += f"**Location:** {job.get('location', location)}\n" # Use search location as fallback
                output_md += f"**Description:** {job.get('description', 'N/A')}\n"
                output_md += f"**Posted:** {job.get('date_posted', 'N/A')}\n"
                link = job.get('link', '#')
                output_md += f"**Link:** [{link}]({link})\n\n"
            return output_md
        except json.JSONDecodeError:
            logger.error(f"Failed to parse job search results: {results_json_str}")
            return "Error displaying job search results."
        except Exception as e:
             logger.exception("Error in search_jobs_interface_handler")
             return f"An unexpected error occurred: {e}"


    def generate_template_interface_handler(doc_type, career_field, experience):
        """Handles Generate Template button click."""
        logger.info(f"Manual Template UI: type='{doc_type}', field='{career_field}', exp='{experience}'")
        template_json_str = generate_document_template(doc_type, career_field, experience)
        try:
            template_data = json.loads(template_json_str)
            if "error" in template_data:
                 return f"Error: {template_data['error']}"
            return template_data.get('template_markdown', "Could not generate template.")
        except json.JSONDecodeError:
             logger.error(f"Failed to parse template results: {template_json_str}")
             return "Error displaying template."
        except Exception as e:
             logger.exception("Error in generate_template_interface_handler")
             return f"An unexpected error occurred: {e}"


    def create_routine_interface_handler(emotion, goal, time_available, days):
        """Handles Create Routine button click."""
        logger.info(f"Manual Routine UI: emo='{emotion}', goal='{goal}', time='{time_available}', days='{days}'")
        routine_json_str = create_personalized_routine(emotion, goal, int(time_available), int(days))
        try:
            routine_data = json.loads(routine_json_str)
            if "error" in routine_data:
                 return f"Error: {routine_data['error']}"

            # Save the generated routine to the user profile
            add_routine_to_user(session_user_id, routine_data)

            # Format for display
            output_md = f"# Your {routine_data.get('name', 'Personalized Routine')}\n\n"
            output_md += f"{routine_data.get('description', '')}\n\n"
            for day_plan in routine_data.get('daily_tasks', []):
                output_md += f"## Day {day_plan.get('day', '?')}\n"
                if not day_plan.get('tasks'):
                     output_md += "- Rest day or free choice.\n"
                else:
                     for task in day_plan.get('tasks', []):
                         output_md += f"- **{task.get('name', 'Task')}** "
                         output_md += f"({task.get('duration', '?')} mins"
                         if 'points' in task: # Only show points if available
                              output_md += f", {task.get('points', '?')} points"
                         output_md += ")\n"
                         output_md += f"  *Why: {task.get('description', '...') }*\n"
                output_md += "\n"

            # Update the gauge chart as well
            gauge_fig = create_routine_completion_gauge(session_user_id)

            return output_md, gr.update(figure=gauge_fig) # Return markdown and updated gauge

        except json.JSONDecodeError:
             logger.error(f"Failed to parse routine results: {routine_json_str}")
             return "Error displaying routine.", gr.update() # Return update for gauge too
        except Exception as e:
             logger.exception("Error in create_routine_interface_handler")
             return f"An unexpected error occurred: {e}", gr.update()


    def analyze_resume_interface_handler(resume_text):
        """Handles Analyze Resume button click."""
        logger.info(f"Manual Resume Analysis UI: length={len(resume_text)}")
        if not resume_text:
            # Clear previous results if input is empty
            return "Please paste your resume text above.", gr.update(figure=None)

        user_profile = get_user_profile(session_user_id)
        career_goal = user_profile.get('career_goal', 'Not specified') # Get goal from profile

        # Save resume first
        save_user_resume(session_user_id, resume_text)

        # Call analysis tool (placeholder version for now)
        analysis_json_str = analyze_resume(resume_text, career_goal)

        try:
            analysis_data = json.loads(analysis_json_str)
            if "error" in analysis_data:
                 return f"Error: {analysis_data['error']}", gr.update() # Update for chart

            # Format analysis for display (adapt based on actual tool output)
            analysis = analysis_data.get('analysis', {})
            output_md = "## Resume Analysis Results\n\n"
            output_md += f"**Analysis against goal:** '{career_goal}'\n\n"
            output_md += "**Strengths:**\n" + "\n".join([f"- {s}" for s in analysis.get('strengths', [])]) + "\n\n"
            output_md += "**Areas for Improvement:**\n" + "\n".join([f"- {s}" for s in analysis.get('areas_for_improvement', [])]) + "\n\n"
            output_md += f"**Format Feedback:** {analysis.get('format_feedback', 'N/A')}\n\n"
            output_md += f"**Content Feedback:** {analysis.get('content_feedback', 'N/A')}\n\n"
            output_md += "**Suggested Next Steps:**\n" + "\n".join([f"- {s}" for s in analysis.get('next_steps', [])])

            # Update skill chart after analysis
            skill_fig = create_skill_radar_chart(session_user_id)

            return output_md, gr.update(figure=skill_fig)

        except json.JSONDecodeError:
             logger.error(f"Failed to parse resume analysis results: {analysis_json_str}")
             return "Error displaying resume analysis.", gr.update()
        except Exception as e:
             logger.exception("Error in analyze_resume_interface_handler")
             return f"An unexpected error occurred: {e}", gr.update()


    def analyze_portfolio_interface_handler(portfolio_url, portfolio_description):
        """Handles Analyze Portfolio button click."""
        logger.info(f"Manual Portfolio Analysis UI: url='{portfolio_url}', desc_len={len(portfolio_description)}")
        if not portfolio_description:
            return "Please provide a description of your portfolio."

        user_profile = get_user_profile(session_user_id)
        career_goal = user_profile.get('career_goal', 'Not specified') # Get goal from profile

         # Save portfolio info first
        save_user_portfolio(session_user_id, portfolio_url, portfolio_description)

        # Call analysis tool (placeholder)
        analysis_json_str = analyze_portfolio(portfolio_description, career_goal, portfolio_url)

        try:
            analysis_data = json.loads(analysis_json_str)
            if "error" in analysis_data:
                 return f"Error: {analysis_data['error']}"

            # Format analysis for display
            analysis = analysis_data.get('analysis', {})
            output_md = "## Portfolio Analysis Results\n\n"
            output_md += f"**Analysis against goal:** '{career_goal}'\n"
            if portfolio_url:
                output_md += f"**Portfolio URL:** {portfolio_url}\n\n"
            output_md += f"**Alignment with Goal:**\n{analysis.get('alignment_with_goal', 'N/A')}\n\n"
            output_md += "**Strengths:**\n" + "\n".join([f"- {s}" for s in analysis.get('strengths', [])]) + "\n\n"
            output_md += "**Areas for Improvement:**\n" + "\n".join([f"- {s}" for s in analysis.get('areas_for_improvement', [])]) + "\n\n"
            output_md += f"**Presentation Feedback:** {analysis.get('presentation_feedback', 'N/A')}\n\n"
            output_md += "**Suggested Next Steps:**\n" + "\n".join([f"- {s}" for s in analysis.get('next_steps', [])])

            return output_md

        except json.JSONDecodeError:
             logger.error(f"Failed to parse portfolio analysis results: {analysis_json_str}")
             return "Error displaying portfolio analysis."
        except Exception as e:
             logger.exception("Error in analyze_portfolio_interface_handler")
             return f"An unexpected error occurred: {e}"

    # --- Progress Tracking Handlers ---

    def complete_task_handler(task_name):
        """Handles marking a task as complete."""
        logger.info(f"Complete Task UI: task='{task_name}' for user {session_user_id}")
        if not task_name:
            return ("Please enter the name of the task you completed.", "",
                    gr.update(), gr.update(), gr.update()) # No chart updates if no task

        # Add task and update points
        user_profile = add_task_to_user(session_user_id, task_name)
        points_earned = 20 # Use a fixed value or get from task data if available

        # Update completion % of latest routine
        db = load_user_database()
        if session_user_id in db['users'] and db['users'][session_user_id].get('routine_history'):
            latest_routine_entry = db['users'][session_user_id]['routine_history'][0] # Get latest
            # Simple: increment completion by a fixed amount per task (e.g., 5-15%)
            # More complex: calculate based on routine definition and completed tasks
            increment = random.randint(5, 15)
            new_completion = min(100, latest_routine_entry.get('completion', 0) + increment)
            latest_routine_entry['completion'] = new_completion
            save_user_database(db) # Save updated DB

        # Refresh charts
        emotion_fig = create_emotion_chart(session_user_id)
        progress_fig = create_progress_chart(session_user_id)
        gauge_fig = create_routine_completion_gauge(session_user_id)

        return (f"Great job completing '{task_name}'! You've earned progress points.",
                "", # Clear task input
                gr.update(figure=emotion_fig),
                gr.update(figure=progress_fig),
                gr.update(figure=gauge_fig))


    def update_emotion_handler(emotion):
        """Handles updating the user's current emotion."""
        logger.info(f"Update Emotion UI: emotion='{emotion}' for user {session_user_id}")
        if not emotion:
            return "Please select an emotion.", gr.update() # No chart update

        add_emotion_record(session_user_id, emotion)

        # Refresh emotion chart
        emotion_fig = create_emotion_chart(session_user_id)

        return f"Your current emotion has been updated to '{emotion}'.", gr.update(figure=emotion_fig)


    def display_recommendations(current_user_id):
        """Fetches and formats recommendations for display."""
        logger.info(f"Displaying recommendations for user {current_user_id}")
        user_profile = get_user_profile(current_user_id)
        recommendations = user_profile.get('recommendations', [])

        if not recommendations:
            return "No recommendations available yet. Chat with Aishura to get personalized suggestions!"

        # Show the most recent 5 recommendations (they are prepended)
        recent_recs = recommendations[:5]

        output_md = "# Your Latest Recommendations\n\n"
        if not recent_recs:
             output_md += "No recommendations yet."
             return output_md

        for i, rec_entry in enumerate(recent_recs, 1):
            rec = rec_entry.get('recommendation', {}) # Get the actual recommendation object
            output_md += f"### {i}. {rec.get('title', 'Recommendation')}\n"
            output_md += f"{rec.get('description', 'No details.')}\n"
            output_md += f"**Priority:** {rec.get('priority', 'N/A').title()} | "
            output_md += f"**Type:** {rec.get('action_type', 'N/A').replace('_', ' ').title()}\n"
            # output_md += f"*Generated: {rec_entry.get('date', 'N/A')}*\n" # Optional: show date
            output_md += "---\n"

        return output_md

    # --- Build Gradio Interface ---
    with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue", secondary_hue="sky")) as app:
        gr.Markdown("# Aishura - Your AI Career Assistant")

        # --- Welcome Screen ---
        with gr.Group(visible=True) as welcome_group:
            gr.Markdown("## Welcome to Aishura!")
            gr.Markdown("Let's get acquainted. Tell me a bit about yourself.")
            with gr.Row():
                with gr.Column():
                    name_input = gr.Textbox(label="Your Name", placeholder="e.g., Alex Chen")
                    location_input = gr.Textbox(label="Your Location", placeholder="e.g., London, UK")
                with gr.Column():
                    emotion_dropdown = gr.Dropdown(choices=EMOTIONS, label="How are you feeling today?")
                    goal_dropdown = gr.Dropdown(choices=GOAL_TYPES, label="What's your main career goal?")
            welcome_button = gr.Button("Start My Journey")
            welcome_output = gr.Markdown() # For validation messages

        # --- Main App Interface (Initially Hidden) ---
        with gr.Group(visible=False) as main_interface:
            with gr.Tabs() as tabs:

                # --- Chat Tab ---
                with gr.TabItem("πŸ’¬ Chat"):
                    with gr.Row():
                        with gr.Column(scale=3):
                            chatbot = gr.Chatbot(
                                label="Aishura Assistant",
                                height=550,
                                avatar_images=("./user_avatar.png", "./aishura_avatar.png"), # Provide paths to avatar images if available
                                bubble_full_width=False,
                                show_copy_button=True
                                )
                            # --- Simulated Emotion Message Area ---
                            emotion_message_area = gr.Markdown("", visible=False, elem_classes="subtle-message") # Hidden initially
                            # --- Chat Input ---
                            msg_textbox = gr.Textbox(
                                show_label=False,
                                placeholder="Type your message here and press Enter...",
                                container=False,
                                scale=1 # Take full width below chatbot
                            )
                        with gr.Column(scale=1):
                            gr.Markdown("### ✨ Recommendations")
                            recommendation_output = gr.Markdown(value="Chat with Aishura to get recommendations.")
                            refresh_recs_button = gr.Button("πŸ”„ Refresh Recommendations")


                # --- Analysis Tab ---
                with gr.TabItem("πŸ“Š Analysis"):
                     with gr.Tabs() as analysis_subtabs:
                        with gr.TabItem("πŸ“„ Resume"):
                            gr.Markdown("### Resume Analysis")
                            gr.Markdown("Paste your full resume below. Aishura can analyze it against your career goals and help identify strengths and areas for improvement.")
                            resume_text_input = gr.Textbox(label="Paste Resume Text Here", lines=15, placeholder="Your resume content...")
                            analyze_resume_button = gr.Button("Analyze My Resume")
                            resume_analysis_output = gr.Markdown()
                        with gr.TabItem("🎨 Portfolio"):
                            gr.Markdown("### Portfolio Analysis")
                            gr.Markdown("Provide a link and/or description of your portfolio (e.g., website, GitHub, Behance).")
                            portfolio_url_input = gr.Textbox(label="Portfolio URL (Optional)", placeholder="[https://your-portfolio.com](https://your-portfolio.com)")
                            portfolio_desc_input = gr.Textbox(label="Portfolio Description", lines=5, placeholder="Describe your portfolio's purpose, key projects, and target audience...")
                            analyze_portfolio_button = gr.Button("Analyze My Portfolio")
                            portfolio_analysis_output = gr.Markdown()
                        with gr.TabItem("πŸ’‘ Skills"):
                             gr.Markdown("### Skill Assessment")
                             gr.Markdown("This chart visualizes skills identified from your latest resume analysis.")
                             skill_radar_chart_output = gr.Plot(label="Skill Radar Chart")


                # --- Tools Tab ---
                with gr.TabItem("πŸ› οΈ Tools"):
                     with gr.Tabs() as tools_subtabs:
                        with gr.TabItem("πŸ” Job Search"):
                             gr.Markdown("### Find Job Opportunities")
                             gr.Markdown("Use this tool to search for jobs based on keywords and location.")
                             job_query_input = gr.Textbox(label="Job Title/Keyword", placeholder="e.g., Software Engineer, Marketing Manager")
                             job_location_input = gr.Textbox(label="Location", placeholder="e.g., New York, Remote")
                             job_results_slider = gr.Slider(minimum=5, maximum=20, value=10, step=1, label="Number of Results")
                             search_jobs_button = gr.Button("Search for Jobs")
                             job_search_output = gr.Markdown()
                        with gr.TabItem("πŸ“ Templates"):
                             gr.Markdown("### Generate Document Templates")
                             gr.Markdown("Get started with common career documents.")
                             doc_type_dropdown = gr.Dropdown(choices=["Resume", "Cover Letter", "LinkedIn Summary", "Networking Email"], label="Select Document Type")
                             doc_field_input = gr.Textbox(label="Career Field (Optional)", placeholder="e.g., Healthcare, Technology")
                             doc_exp_dropdown = gr.Dropdown(choices=["Entry-Level", "Mid-Career", "Senior-Level", "Student/Intern"], label="Experience Level")
                             generate_template_button = gr.Button("Generate Template")
                             template_output_md = gr.Markdown()
                        with gr.TabItem("πŸ“… Routine"):
                             gr.Markdown("### Create a Personalized Routine")
                             gr.Markdown("Develop a daily or weekly plan to work towards your goals, tailored to how you feel.")
                             routine_emotion_dropdown = gr.Dropdown(choices=EMOTIONS, label="How are you feeling about this goal?")
                             routine_goal_input = gr.Textbox(label="Specific Goal for this Routine", placeholder="e.g., Apply to 5 jobs, Learn basic Python")
                             routine_time_slider = gr.Slider(minimum=15, maximum=120, value=45, step=15, label="Minutes Available Per Day")
                             routine_days_slider = gr.Slider(minimum=3, maximum=21, value=7, step=1, label="Routine Length (Days)")
                             create_routine_button = gr.Button("Create My Routine")
                             routine_output_md = gr.Markdown()


                # --- Progress Tab ---
                with gr.TabItem("πŸ“ˆ Progress"):
                    gr.Markdown("## Track Your Journey")
                    with gr.Row():
                        with gr.Column(scale=1):
                            gr.Markdown("### Mark Task Complete")
                            task_input = gr.Textbox(label="Task Name", placeholder="e.g., Updated LinkedIn Profile")
                            complete_button = gr.Button("Complete Task")
                            task_output = gr.Markdown()
                            gr.Markdown("---")
                            gr.Markdown("### Update Emotion")
                            new_emotion_dropdown = gr.Dropdown(choices=EMOTIONS, label="How are you feeling now?")
                            emotion_button = gr.Button("Update Feeling")
                            emotion_output = gr.Markdown()
                        with gr.Column(scale=2):
                             gr.Markdown("### Visualizations")
                             with gr.Row():
                                 emotion_chart_output = gr.Plot(label="Emotional Journey")
                                 progress_chart_output = gr.Plot(label="Progress Points")
                             with gr.Row():
                                 routine_gauge_output = gr.Plot(label="Routine Completion")
                                 # Maybe add skill chart here too? Or keep in Analysis.
                                 gr.Markdown("") # Spacer


        # --- Event Wiring ---

        # Welcome screen action
        welcome_button.click(
            fn=welcome,
            inputs=[name_input, location_input, emotion_dropdown, goal_dropdown],
            outputs=[chatbot, welcome_group, main_interface, # Show/hide groups
                     emotion_chart_output, progress_chart_output, routine_gauge_output, skill_radar_chart_output] # Populate initial charts
        )

        # Chat submission
        msg_textbox.submit(
            fn=chat_submit,
            inputs=[msg_textbox, chatbot],
            outputs=[chatbot, msg_textbox, recommendation_output] # Update chatbot, clear input, refresh recs
        )

        # Recommendation refresh button
        refresh_recs_button.click(
            fn=lambda: display_recommendations(session_user_id), # Use lambda to pass user_id
            inputs=[],
            outputs=[recommendation_output]
        )

        # --- Simulated Emotion Message Wiring ---
        # Simple simulation: Show/hide message on focus/blur (or change)
        # msg_textbox.focus(fn=handle_chat_focus, outputs=[emotion_message_area])
        # msg_textbox.blur(fn=hide_emotion_message, outputs=[emotion_message_area])
        # Example: Show retype message briefly on change, then hide
        # msg_textbox.change(fn=show_retype_message, outputs=emotion_message_area).then(
        #      fn=hide_emotion_message, outputs=emotion_message_area, js="() => { return new Promise(resolve => setTimeout(() => { resolve('') }, 2000)) }")


        # Analysis Tab Wiring
        analyze_resume_button.click(
            fn=analyze_resume_interface_handler,
            inputs=[resume_text_input],
            outputs=[resume_analysis_output, skill_radar_chart_output] # Update analysis text and skill chart
        )
        analyze_portfolio_button.click(
            fn=analyze_portfolio_interface_handler,
            inputs=[portfolio_url_input, portfolio_desc_input],
            outputs=[portfolio_analysis_output]
        )

         # Tools Tab Wiring
        search_jobs_button.click(
             fn=search_jobs_interface_handler,
             inputs=[job_query_input, job_location_input, job_results_slider],
             outputs=[job_search_output]
        )
        generate_template_button.click(
             fn=generate_template_interface_handler,
             inputs=[doc_type_dropdown, doc_field_input, doc_exp_dropdown],
             outputs=[template_output_md]
        )
        create_routine_button.click(
             fn=create_routine_interface_handler,
             inputs=[routine_emotion_dropdown, routine_goal_input, routine_time_slider, routine_days_slider],
             outputs=[routine_output_md, routine_gauge_output] # Update routine text and gauge chart
        )

         # Progress Tab Wiring
        complete_button.click(
            fn=complete_task_handler,
            inputs=[task_input],
            outputs=[task_output, task_input, # Update message, clear input
                     emotion_chart_output, progress_chart_output, routine_gauge_output] # Update all charts
        )
        emotion_button.click(
            fn=update_emotion_handler,
            inputs=[new_emotion_dropdown],
            outputs=[emotion_output, emotion_chart_output] # Update message and emotion chart
        )

        # Load initial state for elements that need it (e.g., charts if resuming session)
        # app.load(...) could be used here if state management was more robust.


    return app

# --- Main Execution ---
if __name__ == "__main__":
    if not OPENAI_API_KEY or not SERPER_API_KEY:
        print("*****************************************************")
        print("Warning: API keys for OpenAI or Serper not found.")
        print("Please set OPENAI_API_KEY and SERPER_API_KEY environment variables.")
        print("You can create a .env file in the same directory:")
        print("OPENAI_API_KEY=your_openai_key")
        print("SERPER_API_KEY=your_serper_key")
        print("*****************************************************")
        # Decide whether to exit or continue with limited functionality
        # exit(1)

    logger.info("Starting Aishura Gradio application...")
    aishura_app = create_interface()
    # Consider adding share=False for local testing, share=True for public link
    aishura_app.launch(share=False)
    logger.info("Aishura Gradio application stopped.")