File size: 39,359 Bytes
92da2a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
import gradio as gr
import faiss
import numpy as np
import os
import json
import datetime
import uuid
import asyncio
import time
from typing import Dict, List, Optional, Tuple, Any
from urllib.parse import urlparse
from urllib.robotparser import RobotFileParser

import pandas as pd
import ollama
from duckduckgo_search import DDGS
import requests

# crawl4ai setup
try:
    from crawl4ai import AsyncWebCrawler, BrowserConfig, CacheMode, CrawlerRunConfig
    from crawl4ai.content_filter_strategy import BM25ContentFilter
    from crawl4ai.markdown_generation_strategy import DefaultMarkdownGenerator
    CRAWL4AI_AVAILABLE = True
except ImportError:
    print("WARNING: crawl4ai library not found or failed to import. Resource finding will be disabled.")
    print("Install it: pip install 'crawl4ai[playwright]' and run 'playwright install --with-deps'")
    CRAWL4AI_AVAILABLE = False


# --- Configuration ---
OLLAMA_MODEL = "llama3:8b" # Or your preferred model
FAISS_INDEX_FILE = "faiss_index.index"
FAISS_METADATA_FILE = "faiss_metadata.json"
USER_DATA_DIR = "user_data"
COMMUNITY_FILE = "community_posts.json"
os.makedirs(USER_DATA_DIR, exist_ok=True)

# FAISS Vector Dimension (Must match Ollama embedding model)
# nomic-embed-text: 768
VECTOR_DIMENSION = 768


# --- System Prompts (Same as before) ---
EMOTION_ANALYSIS_PROMPT = """...""" # Keep as is
GROWTH_PLAN_PROMPT = """...""" # Keep as is
RESOURCE_SYNTHESIS_PROMPT = """...""" # Keep as is (adjust if needed for FAISS context)
COMMUNITY_SUGGESTION_PROMPT = """...""" # Keep as is

# --- Data Persistence Functions ---

def load_user_data(username: str) -> Dict:
    """Loads data for a specific user from a JSON file."""
    if not username: return {"entries": [], "plans": {}, "resources": {}, "profile": {}}
    filepath = os.path.join(USER_DATA_DIR, f"{username}.json")
    if os.path.exists(filepath):
        try:
            with open(filepath, 'r') as f:
                return json.load(f)
        except json.JSONDecodeError:
            print(f"Warning: Corrupted data file for user {username}. Starting fresh.")
            return {"entries": [], "plans": {}, "resources": {}, "profile": {}} # Return default on error
    else:
        # Create initial structure for new user
        return {"entries": [], "plans": {}, "resources": {}, "profile": {"username": username, "joined": datetime.datetime.now().isoformat(), "points": 0, "goals":{}}}

def save_user_data(username: str, data: Dict):
    """Saves data for a specific user to a JSON file."""
    if not username: return
    filepath = os.path.join(USER_DATA_DIR, f"{username}.json")
    with open(filepath, 'w') as f:
        json.dump(data, f, indent=4)

def load_community_posts() -> List[Dict]:
    """Loads community posts from a JSON file."""
    if os.path.exists(COMMUNITY_FILE):
        try:
            with open(COMMUNITY_FILE, 'r') as f:
                return json.load(f)
        except json.JSONDecodeError:
             print("Warning: Community posts file corrupted. Starting fresh.")
             return []
    else:
        return []

def save_community_posts(posts: List[Dict]):
    """Saves community posts to a JSON file."""
    with open(COMMUNITY_FILE, 'w') as f:
        json.dump(posts, f, indent=4)

# --- FAISS and Embedding Functions ---

def get_ollama_embeddings(texts: List[str], model_name: str = "nomic-embed-text:latest") -> Tuple[List[List[float]], bool]:
    """Gets embeddings from Ollama. Returns embeddings and a success flag."""
    ollama_api_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") + "/api/embeddings"
    embeddings = []
    all_successful = True
    max_retries = 2
    retry_delay = 1

    for text in texts:
        if not text or not isinstance(text, str):
            print(f"Warning: Skipping embedding for invalid input: {text}")
            embeddings.append([])
            all_successful = False
            continue

        current_embedding = []
        for attempt in range(max_retries):
            try:
                response = requests.post(ollama_api_url, json={"model": model_name, "prompt": text}, headers={"Content-Type": "application/json"})
                response.raise_for_status()
                result = response.json()
                if "embedding" in result and len(result["embedding"]) == VECTOR_DIMENSION:
                    current_embedding = result["embedding"]
                    break # Success for this text
                else:
                    print(f"Warning: Ollama response issue (attempt {attempt+1}) for text: {text[:50]}... Response: {result}")
                    if attempt == max_retries - 1: all_successful = False
            except Exception as e:
                print(f"Error getting Ollama embedding (Attempt {attempt+1}/{max_retries}): {e}")
                if attempt < max_retries - 1: time.sleep(retry_delay)
                else: all_successful = False

        embeddings.append(current_embedding) # Append embedding or empty list if failed

    # Replace empty lists with zero vectors
    final_embeddings = []
    for emb in embeddings:
         if emb:
             final_embeddings.append(emb)
         else:
             print("Warning: Replacing failed embedding with zero vector.")
             final_embeddings.append([0.0] * VECTOR_DIMENSION)
             all_successful = False # Mark overall success as False if any failed

    return final_embeddings, all_successful

def create_or_load_faiss_index() -> Tuple[Optional[faiss.Index], Dict[int, Dict]]:
    """Loads FAISS index and metadata, or creates empty ones."""
    index = None
    metadata = {}
    if os.path.exists(FAISS_INDEX_FILE) and os.path.exists(FAISS_METADATA_FILE):
        try:
            print(f"Loading FAISS index from {FAISS_INDEX_FILE}")
            index = faiss.read_index(FAISS_INDEX_FILE)
            print(f"Loading FAISS metadata from {FAISS_METADATA_FILE}")
            with open(FAISS_METADATA_FILE, 'r') as f:
                # Load metadata, converting string keys back to int
                metadata_str_keys = json.load(f)
                metadata = {int(k): v for k, v in metadata_str_keys.items()}
            print(f"Loaded index with {index.ntotal} vectors and {len(metadata)} metadata entries.")
            # Consistency check (optional but recommended)
            if index.ntotal != len(metadata):
                print(f"WARNING: FAISS index size ({index.ntotal}) != metadata size ({len(metadata)}). Rebuilding might be needed.")
                # Decide recovery strategy: clear both, try to align, etc.
                # Simplest: clear both and start over if inconsistent
                # index = None
                # metadata = {}
        except Exception as e:
            print(f"Error loading FAISS data: {e}. Starting fresh.")
            index = None
            metadata = {}

    if index is None:
        print("Creating new FAISS index.")
        # Using IndexFlatL2, simple L2 distance. IndexIVFFlat is faster for large datasets but needs training.
        index = faiss.IndexFlatL2(VECTOR_DIMENSION)
        metadata = {}

    return index, metadata

def save_faiss_index(index: faiss.Index, metadata: Dict[int, Dict]):
    """Saves FAISS index and metadata."""
    try:
        print(f"Saving FAISS index to {FAISS_INDEX_FILE} ({index.ntotal} vectors)")
        faiss.write_index(index, FAISS_INDEX_FILE)
        print(f"Saving FAISS metadata to {FAISS_METADATA_FILE} ({len(metadata)} entries)")
        with open(FAISS_METADATA_FILE, 'w') as f:
            # Store metadata with string keys for JSON compatibility
            json.dump({str(k): v for k, v in metadata.items()}, f, indent=4)
        print("FAISS data saved successfully.")
    except Exception as e:
        print(f"Error saving FAISS data: {e}")
        gr.Warning(f"Failed to save resource index: {e}")


def add_to_faiss(index: faiss.Index, metadata: Dict[int, Dict], content_list: List[Dict]) -> Tuple[faiss.Index, Dict[int, Dict], int]:
    """Adds crawled content to FAISS index and metadata."""
    texts_to_embed = [item.get('markdown', '') for item in content_list if item.get('markdown')]
    urls = [item.get('url', 'Unknown URL') for item in content_list] # Track URLs

    if not texts_to_embed:
        print("No text content provided to add_to_faiss.")
        return index, metadata, 0

    print(f"Generating embeddings for {len(texts_to_embed)} chunks...")
    embeddings, success = get_ollama_embeddings(texts_to_embed)
    if not success:
        gr.Warning("Some embeddings failed to generate. Results might be incomplete.")

    valid_embeddings = np.array([emb for emb in embeddings if emb], dtype='float32')

    if valid_embeddings.shape[0] == 0:
        print("No valid embeddings generated.")
        return index, metadata, 0

    # Add vectors to FAISS index
    start_index = index.ntotal
    index.add(valid_embeddings)
    print(f"Added {valid_embeddings.shape[0]} vectors to FAISS index. New total: {index.ntotal}")

    # Add corresponding metadata
    added_count = 0
    original_indices_added = [i for i, emb in enumerate(embeddings) if emb] # Indices from original list that had valid embeddings

    for i, original_idx in enumerate(original_indices_added):
        faiss_id = start_index + i
        metadata[faiss_id] = {
            "text": texts_to_embed[original_idx],
            "url": urls[original_idx],
            # Add other relevant info like title if available from crawler
        }
        added_count += 1

    print(f"Added metadata for {added_count} entries.")
    return index, metadata, added_count

def search_faiss(index: faiss.Index, metadata: Dict[int, Dict], query_text: str, k: int = 5) -> List[Dict]:
    """Searches FAISS index and returns relevant metadata entries."""
    if not query_text or index.ntotal == 0:
        return []

    print(f"Generating embedding for query: {query_text[:50]}...")
    query_embedding, success = get_ollama_embeddings([query_text])

    if not success or not query_embedding[0]:
        gr.Error("Failed to generate embedding for search query.")
        return []

    query_vector = np.array(query_embedding, dtype='float32')

    print(f"Searching FAISS index (k={k})...")
    try:
         # D: distances, I: indices (IDs)
        distances, indices = index.search(query_vector, k)
        results = []
        if indices.size > 0:
            for i, faiss_id in enumerate(indices[0]): # indices is 2D array [[id1, id2, ...]]
                 if faiss_id != -1: # -1 indicates no neighbor found
                    entry = metadata.get(faiss_id)
                    if entry:
                         entry_with_score = entry.copy()
                         # L2 distance, lower is better. Can convert to similarity score if needed.
                         entry_with_score['score'] = float(distances[0][i])
                         results.append(entry_with_score)
                    else:
                        print(f"Warning: FAISS ID {faiss_id} not found in metadata.")
        print(f"Found {len(results)} results from FAISS.")
        return results
    except Exception as e:
         print(f"Error during FAISS search: {e}")
         gr.Error(f"FAISS search failed: {e}")
         return []

# --- LLM Interaction Functions ---
def call_ollama_chat(system_prompt: str, user_prompt: str) -> Dict:
    messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
    try:
        response = ollama.chat(model=OLLAMA_MODEL, messages=messages)
        response_content = response['message']['content']
        try:
            # Clean potential markdown code block fences
            if response_content.startswith("```json"): response_content = response_content[7:]
            if response_content.endswith("```"): response_content = response_content[:-3]
            parsed_json = json.loads(response_content.strip())
            return parsed_json
        except json.JSONDecodeError:
            print(f"LLM response for '{system_prompt[:30]}...' was not valid JSON.")
            return {"raw_response": response_content}
    except Exception as e:
        print(f"Error calling Ollama: {e}")
        return {"error": str(e)}

# Specific LLM tasks
def analyze_emotion(journal_entry: str) -> Dict:
    return call_ollama_chat(EMOTION_ANALYSIS_PROMPT, journal_entry)

def generate_growth_plan(emotion_analysis: Dict, user_goals: Dict) -> Dict:
    input_data = {"emotion_analysis": emotion_analysis, "user_goals": user_goals}
    return call_ollama_chat(GROWTH_PLAN_PROMPT, json.dumps(input_data, indent=2))

def synthesize_resources_llm(emotion_analysis: Dict, growth_plan: Optional[Dict], search_results: List[Dict]) -> Dict:
    """Synthesizes resources using LLM based on FAISS search results."""
    if not search_results: return {"error": "No search results provided for synthesis."}
    # Extract text and URLs from search results
    snippets = [f"--- Content from {res.get('url', 'Unknown')} ---\n{res.get('text', '')}" for res in search_results]
    source_urls = list(set([res.get('url', 'Unknown') for res in search_results]))
    combined_content = "\n\n".join(snippets)

    input_data = {
        "emotion_analysis": emotion_analysis,
        "growth_plan": growth_plan if growth_plan else "No specific growth plan available.",
        "web_content_snippets": combined_content,
        "source_urls_provided": source_urls
    }
    synthesis_result = call_ollama_chat(RESOURCE_SYNTHESIS_PROMPT, json.dumps(input_data, indent=2))
    # Add source URLs if LLM didn't
    if isinstance(synthesis_result, dict) and 'source_urls' not in synthesis_result:
         synthesis_result['source_urls'] = source_urls
    return synthesis_result

def get_community_suggestions(emotion_analysis: Dict, growth_plan: Optional[Dict]) -> Dict:
    input_data = {"emotion_analysis": emotion_analysis, "growth_plan": growth_plan}
    return call_ollama_chat(COMMUNITY_SUGGESTION_PROMPT, json.dumps(input_data, indent=2))


# --- Web Search and Crawl Functions ---
def get_web_urls(search_term: str, num_results: int = 3) -> List[str]:
    # ... (Keep implementation from previous simplified version) ...
    allowed_urls = []
    try:
        enhanced_search = f"{search_term} emotional regulation coping strategies therapy techniques"
        print(f"Searching DDG for: {enhanced_search}")
        results = DDGS().text(enhanced_search, max_results=num_results * 2) # Fetch slightly more
        urls = [result["href"] for result in results if result.get("href")]
        # Basic filtering
        filtered_urls = []
        seen_domains = set()
        discard_domains = {"youtube.com", "amazon.com", "pinterest.com", "facebook.com", "instagram.com", "twitter.com", "tiktok.com"}
        for url in urls:
            if url.lower().endswith(".pdf"): continue
            try:
                domain = urlparse(url).netloc.replace("www.", "")
                if domain and domain not in seen_domains and domain not in discard_domains:
                    filtered_urls.append(url)
                    seen_domains.add(domain)
            except Exception: continue
        allowed_urls = check_robots_txt(filtered_urls[:num_results]) # Limit to desired number
        print(f"Allowed URLs: {allowed_urls}")
    except Exception as e: print(f"❌ Failed search: {str(e)}")
    return allowed_urls

def check_robots_txt(urls: List[str]) -> List[str]: # Simplified
    return urls

async def crawl_webpages_simple(urls: List[str]) -> List[Dict]:
    """Crawls pages, returns [{'url': url, 'markdown': markdown}]."""
    if not CRAWL4AI_AVAILABLE or not urls: return []
    md_generator = DefaultMarkdownGenerator()
    crawler_config = CrawlerRunConfig(markdown_generator=md_generator, excluded_tags=["script", "style", "nav", "footer", "aside"], only_text=False, cache_mode=CacheMode.NORMAL, user_agent="Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)", page_timeout=20000, wait_for_network_idle=True, network_idle_timeout=3000)
    browser_config = BrowserConfig(headless=True, text_mode=False, light_mode=True)
    results_list = []
    print(f"Crawling {len(urls)} URLs...")
    try:
        async with AsyncWebCrawler(config=browser_config) as crawler:
            crawl_results = await crawler.arun_many(urls, config=crawler_config)
            for res in crawl_results:
                 markdown_content = res.markdown_v2.raw_markdown if (res and res.markdown_v2 and res.markdown_v2.raw_markdown) else ""
                 if markdown_content.strip(): results_list.append({'url': res.url, 'markdown': markdown_content.strip()})
    except Exception as e: print(f"Crawling error: {e}")
    print(f"Crawled {len(results_list)} pages successfully.")
    return results_list


# --- Gradio App Logic ---

# Load initial FAISS data
faiss_index, faiss_metadata = create_or_load_faiss_index()
community_posts_global = load_community_posts() # Load community posts once

# Helper to format analysis for display
def format_analysis(analysis):
    if not analysis or "error" in analysis or "raw_response" in analysis:
         return f"Analysis Error or Incomplete:\n```json\n{json.dumps(analysis, indent=2)}\n```"
    md = f"""
**Primary Emotion:** {analysis.get('primary_emotion', 'N/A')} (Intensity: {analysis.get('intensity', 'N/A')}/10)
**Triggers:** {', '.join(analysis.get('triggers', [])) or 'None'}
**Patterns:** {', '.join(analysis.get('patterns', [])) or 'None'}

**Growth Opportunities:**
"""
    for opp in analysis.get('growth_opportunities', []): md += f"- {opp}\n"
    md += "\n**Action Steps:**\n"
    for step in analysis.get('action_steps', []): md += f"- {step}\n"
    return md

# Helper to format plan for display
def format_plan(plan):
     if not plan or "error" in plan or "raw_response" in plan:
          return f"Plan Error or Incomplete:\n```json\n{json.dumps(plan, indent=2)}\n```"
     md = "**Short-term Actions:**\n"
     for item in plan.get('short_term_actions', []): md += f"- {item}\n"
     md += "\n**Medium-term Practices:**\n"
     for item in plan.get('medium_term_practices', []): md += f"- {item}\n"
     md += "\n**Long-term Changes:**\n"
     for item in plan.get('long_term_changes', []): md += f"- {item}\n"
     md += "\n**Reflection Prompts:**\n"
     for item in plan.get('reflection_prompts', []): md += f"- {item}\n"
     md += "\n**Success Metrics:**\n"
     for item in plan.get('success_metrics', []): md += f"- {item}\n"
     return md

# Helper to format synthesis for display
def format_synthesis(synthesis):
    if not synthesis or "error" in synthesis or "raw_response" in synthesis:
        return f"Synthesis Error or Incomplete:\n```json\n{json.dumps(synthesis, indent=2)}\n```"
    md = "**Key Insights:**\n"
    for item in synthesis.get('key_insights', []): md += f"- {item}\n"
    md += "\n**Practical Exercises:**\n"
    for item in synthesis.get('practical_exercises', []): md += f"- {item}\n"
    md += "\n**Recommended Readings:**\n"
    for item in synthesis.get('recommended_readings', []): md += f"- {item}\n"
    md += f"\n**Expert Advice Summary:**\n{synthesis.get('expert_advice', 'N/A')}\n"
    md += "\n**Action Plan:**\n"
    for item in synthesis.get('action_plan', []): md += f"- {item}\n"
    md += "\n**Sources:**\n"
    for item in synthesis.get('source_urls', []): md += f"- {item}\n"
    return md

# Helper to format community posts
def format_community_posts(posts):
    if not posts: return "No community posts yet."
    md = ""
    for post in sorted(posts, key=lambda x: x['timestamp'], reverse=True):
         comments_md = ""
         for c in sorted(post.get('comments', []), key=lambda x: x['timestamp']):
             comments_md += f"  - **{c['user_id']}** ({c['timestamp'][:16]}): {c['comment']}\n"
         md += f"""
### {post['title']}
**By:** {post['user_id']} ({post['timestamp'][:16]}) | **Likes:** {post['likes']}
{post['content']}
**Comments ({len(post.get('comments',[]))}):**
{comments_md or '  (No comments)'}
---
"""
    return md


# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft(), title="EmotionToAction") as demo:
    # --- State Management ---
    # User-specific data loaded from file
    user_data_state = gr.State({})
    # FAISS index and metadata loaded once
    faiss_index_state = gr.State(faiss_index)
    faiss_metadata_state = gr.State(faiss_metadata)
    # Session state for current analysis/plan context
    current_analysis_state = gr.State(None)
    current_plan_state = gr.State(None)
    current_emotion_id_state = gr.State(None) # ID of the entry being viewed/processed
    community_posts_state = gr.State(community_posts_global) # Use global list loaded once

    gr.Markdown("# 🌱 EmotionToAction (Gradio Version)")

    with gr.Row():
         username_input = gr.Textbox(label="Enter Username", placeholder="Type username and press Enter")
         # Output for status messages
         status_output = gr.Markdown("")

    # --- Main Tabs ---
    with gr.Tabs() as tabs:
        # --- Journal Tab ---
        with gr.TabItem("πŸ“ Journal", id=0):
            with gr.Row():
                with gr.Column(scale=2):
                     journal_entry_input = gr.Textbox(label="What are you feeling right now?", lines=10, placeholder="Describe your emotional experience...")
                     analyze_button = gr.Button("Analyze Emotions", variant="primary")
                with gr.Column(scale=1):
                     gr.Markdown("### Past Entries")
                     past_entries_display = gr.DataFrame(headers=["Date", "Emotion", "Entry Snippet", "Entry ID"], interactive=False, height=300)
                     # Load past entries when user changes or button clicked?

        # --- Analysis Tab ---
        with gr.TabItem("🧠 Analysis", id=1):
            gr.Markdown("### AI Emotion Analysis")
            analysis_display = gr.Markdown("Analysis will appear here after submitting a journal entry.")
            with gr.Row():
                 plan_button = gr.Button("πŸ’‘ Create Growth Plan")
                 find_resources_button = gr.Button("πŸ”Ž Find & Add Resources") # Changed label

        # --- Plan Tab ---
        with gr.TabItem("πŸš€ Growth Plan", id=2):
            gr.Markdown("### Your Personalized Growth Plan")
            plan_display = gr.Markdown("Plan will appear here after generation.")

        # --- Resources Tab ---
        with gr.TabItem("πŸ“š Resources", id=3):
             with gr.Accordion("Find New Resources (Adds to Index)", open=False):
                  find_resources_status = gr.Markdown("Trigger resource finding from the 'Analysis' tab.")
             with gr.Accordion("Synthesize Found Resources", open=True):
                  synthesis_query_input = gr.Textbox(label="Describe the topic you want synthesized resources for (e.g., 'managing anxiety triggered by work')", placeholder="Uses info stored in resource index...")
                  synthesize_button = gr.Button("Synthesize Resources", variant="secondary")
                  synthesis_display = gr.Markdown("Synthesized insights will appear here.")
             with gr.Accordion("Search Indexed Resources", open=True):
                  search_query_input = gr.Textbox(label="Search indexed content", placeholder="Enter keywords...")
                  search_button = gr.Button("Search Index", variant="secondary")
                  search_results_display = gr.DataFrame(headers=["Text Snippet", "Source URL", "Score"], interactive=False, height=300)


        # --- Community Tab ---
        with gr.TabItem("πŸ‘₯ Community", id=4):
            gr.Markdown("### Community Hub")
            with gr.Row():
                with gr.Column(scale=2):
                    gr.Markdown("#### Recent Posts")
                    community_feed_display = gr.Markdown("Loading posts...") # Use Markdown for better formatting
                with gr.Column(scale=1):
                     gr.Markdown("#### New Post")
                     post_title_input = gr.Textbox(label="Title")
                     post_content_input = gr.Textbox(label="Content", lines=5)
                     post_button = gr.Button("Submit Post", variant="primary")
                     # Add like/comment inputs here if desired (more complex)

        # --- Profile Tab ---
        with gr.TabItem("πŸ‘€ Profile", id=5):
             gr.Markdown("### Your Profile")
             profile_points_display = gr.Number(label="Growth Points", interactive=False)
             profile_joined_display = gr.Textbox(label="Member Since", interactive=False)
             gr.Markdown("#### Growth Goals")
             profile_goal1_input = gr.Textbox(label="Goal 1")
             profile_goal2_input = gr.Textbox(label="Goal 2")
             save_goals_button = gr.Button("Save Goals")

    # --- Event Handlers ---

    # Load user data when username is entered
    def handle_username_change(username, user_data_s):
        if not username:
            return {"entries": [], "plans": {}, "resources": {}, "profile": {}}, "Please enter a username.", None, None, None, None, None, None, None
        print(f"Loading data for user: {username}")
        user_data = load_user_data(username)
        # Ensure profile exists
        if "profile" not in user_data:
             user_data["profile"] = {"username": username, "joined": datetime.datetime.now().isoformat(), "points": 0, "goals":{}}
        # Format past entries for DataFrame display
        entry_list = user_data.get("entries", [])
        df_data = [
             [e['timestamp'][:10], e['analysis'].get('primary_emotion', 'N/A'), e['journal_entry'][:50]+'...', e['id']]
             for e in sorted(entry_list, key=lambda x:x['timestamp'], reverse=True)
         ]
        past_entries_df = pd.DataFrame(df_data, columns=["Date", "Emotion", "Entry Snippet", "Entry ID"])

        return user_data, f"Loaded data for {username}.", past_entries_df, \
               user_data.get("profile", {}).get("points", 0), \
               user_data.get("profile", {}).get("joined", ""), \
               user_data.get("profile", {}).get("goals", {}).get("goal1", ""), \
               user_data.get("profile", {}).get("goals", {}).get("goal2", "")

    username_input.submit(
        handle_username_change,
        inputs=[username_input, user_data_state],
        outputs=[user_data_state, status_output, past_entries_display,
                 profile_points_display, profile_joined_display,
                 profile_goal1_input, profile_goal2_input]
    )

    # Analyze Button Click
    def handle_analyze(username, user_data_s, journal_entry):
        if not username: return "Please enter username first.", None, None, None, None, None
        if not journal_entry: return "Journal entry cannot be empty.", None, None, None, None, None

        status = "Analyzing emotions..."
        yield status, None, None, None, None, None # Update status immediately

        analysis = analyze_emotion(journal_entry)

        if "error" in analysis:
            status = f"Analysis failed: {analysis['error']}"
            formatted_analysis = f"Error:\n```json\n{json.dumps(analysis, indent=2)}\n```"
            yield status, formatted_analysis, None, None, None, None
        elif "raw_response" in analysis:
             status = "Analysis complete (raw response)."
             formatted_analysis = f"Raw Response:\n```\n{analysis['raw_response']}\n```"
             # Cannot proceed with raw response usually
             yield status, formatted_analysis, None, None, None, None
        else:
            # Save entry and update user data
            entry_id = str(uuid.uuid4())
            new_entry = {
                'id': entry_id,
                'timestamp': datetime.datetime.now().isoformat(),
                'journal_entry': journal_entry,
                'analysis': analysis
            }
            user_data_s["entries"] = user_data_s.get("entries", []) + [new_entry]
            user_data_s["profile"]["points"] = user_data_s.get("profile", {}).get("points", 0) + 10
            save_user_data(username, user_data_s)

            # Update UI
            status = "Analysis complete!"
            formatted_analysis = format_analysis(analysis)
            # Update past entries display immediately
            entry_list = user_data_s.get("entries", [])
            df_data = [[e['timestamp'][:10], e['analysis'].get('primary_emotion', 'N/A'), e['journal_entry'][:50]+'...', e['id']] for e in sorted(entry_list, key=lambda x:x['timestamp'], reverse=True)]
            past_entries_df = pd.DataFrame(df_data, columns=["Date", "Emotion", "Entry Snippet", "Entry ID"])

            # Return updates for status, analysis display, current analysis state, current emotion ID, user data state, and past entries df
            yield status, formatted_analysis, analysis, entry_id, user_data_s, past_entries_df

    analyze_button.click(
        handle_analyze,
        inputs=[username_input, user_data_state, journal_entry_input],
        outputs=[status_output, analysis_display, current_analysis_state, current_emotion_id_state, user_data_state, past_entries_display]
    )

    # Create Plan Button Click
    def handle_create_plan(username, user_data_s, current_analysis, current_emotion_id):
        if not username: return "Please enter username.", None, None
        if not current_analysis: return "Please analyze an entry first.", None, None
        if not current_emotion_id: return "Internal error: Missing emotion ID.", None, None

        status = "Generating growth plan..."
        yield status, None, None # Update status immediately

        user_goals = user_data_s.get("profile", {}).get("goals", {})
        plan = generate_growth_plan(current_analysis, user_goals)

        if "error" in plan or "raw_response" in plan:
            status = "Failed to generate plan."
            formatted_plan = f"Error/Raw:\n```json\n{json.dumps(plan, indent=2)}\n```"
            yield status, formatted_plan, None
        else:
            # Save plan and update points
            user_data_s["plans"] = user_data_s.get("plans", {})
            user_data_s["plans"][current_emotion_id] = plan
            user_data_s["profile"]["points"] = user_data_s.get("profile", {}).get("points", 0) + 20
            save_user_data(username, user_data_s)
            status = "Growth plan generated!"
            formatted_plan = format_plan(plan)
            yield status, formatted_plan, plan # Update status, display, and plan state

    plan_button.click(
         handle_create_plan,
         inputs=[username_input, user_data_state, current_analysis_state, current_emotion_id_state],
         outputs=[status_output, plan_display, current_plan_state]
    )

    # Find & Add Resources Button Click (Async)
    async def handle_find_resources(username, current_analysis, faiss_index_s, faiss_metadata_s, progress=gr.Progress(track_tqdm=True)):
        if not username: return "Please enter username.", faiss_index_s, faiss_metadata_s, "Idle"
        if not current_analysis: return "Please analyze an entry first.", faiss_index_s, faiss_metadata_s, "Idle"
        if not CRAWL4AI_AVAILABLE: return "crawl4ai library not installed.", faiss_index_s, faiss_metadata_s, "Error"

        status_msg = "Starting resource finding..."
        yield status_msg, faiss_index_s, faiss_metadata_s, status_msg # Initial update

        emotion = current_analysis.get('primary_emotion', 'challenge')
        triggers = current_analysis.get('triggers', [])
        search_term = f"{emotion} coping strategies {' '.join(triggers)}"

        progress(0.1, desc="Searching web...")
        status_msg = "Searching web..."
        yield status_msg, faiss_index_s, faiss_metadata_s, status_msg
        urls = get_web_urls(search_term, num_results=3) # Limit URLs
        if not urls: yield "No relevant URLs found.", faiss_index_s, faiss_metadata_s, "No URLs found."; return

        progress(0.3, desc=f"Crawling {len(urls)} pages...")
        status_msg = f"Crawling {len(urls)} pages..."
        yield status_msg, faiss_index_s, faiss_metadata_s, status_msg
        crawled_content = await crawl_webpages_simple(urls) # Async call
        if not crawled_content: yield "Crawling failed or yielded no content.", faiss_index_s, faiss_metadata_s, "Crawling failed."; return

        progress(0.7, desc="Adding content to FAISS index...")
        status_msg = "Adding content to index..."
        yield status_msg, faiss_index_s, faiss_metadata_s, status_msg
        # Note: add_to_faiss modifies the index/metadata objects in place
        index_obj = faiss_index_s # Get current index from state
        meta_obj = faiss_metadata_s # Get current metadata from state
        _, _, added_count = add_to_faiss(index_obj, meta_obj, crawled_content)

        if added_count > 0:
             # IMPORTANT: Save the modified index and metadata back to disk
             save_faiss_index(index_obj, meta_obj)
             status_msg = f"Successfully added {added_count} content chunks to the index."
             yield status_msg, index_obj, meta_obj, status_msg # Return updated state objects
        else:
             status_msg = "Crawled content, but failed to add anything to the index."
             yield status_msg, index_obj, meta_obj, status_msg

    # Use the wrapper for async function
    find_resources_button.click(
        handle_find_resources,
        inputs=[username_input, current_analysis_state, faiss_index_state, faiss_metadata_state],
        outputs=[status_output, faiss_index_state, faiss_metadata_state, find_resources_status] # Update index/meta state
    )

    # Synthesize Button Click
    def handle_synthesize(username, user_data_s, current_emotion_id, faiss_index_s, faiss_metadata_s, query_override=""):
        if not username: return "Please enter username.", None
        # Prioritize using an emotion context if available, else use the override query
        search_text = ""
        context_analysis = None
        context_plan = None
        if not query_override and current_emotion_id:
            entry = next((e for e in user_data_s.get("entries", []) if e['id'] == current_emotion_id), None)
            if entry and 'analysis' in entry:
                 context_analysis = entry['analysis']
                 context_plan = user_data_s.get("plans", {}).get(current_emotion_id)
                 emotion = context_analysis.get('primary_emotion', 'issue')
                 triggers = context_analysis.get('triggers', [])
                 search_text = f"{emotion} coping techniques {' '.join(triggers)}"
            else: query_override = "general emotional coping strategies" # Fallback if context missing
        elif not query_override:
             query_override = "general emotional coping strategies" # Default if no context

        if query_override: search_text = query_override
        if not search_text: return "Cannot determine search topic.", None

        status = f"Searching index for '{search_text[:30]}...' and synthesizing..."
        yield status, "Synthesizing..." # Update status

        search_results = search_faiss(faiss_index_s, faiss_metadata_s, search_text, k=5)
        if not search_results:
            yield f"No relevant info found in index for '{search_text[:30]}...'", "No results found."
            return

        synthesis = synthesize_resources_llm(context_analysis or {}, context_plan, search_results)

        if "error" in synthesis or "raw_response" in synthesis:
             formatted_synthesis = f"Synthesis Error/Raw:\n```json\n{json.dumps(synthesis, indent=2)}\n```"
             yield "Synthesis failed.", formatted_synthesis
        else:
             # Save synthesis result associated with the emotion ID if context was used
             if context_analysis and current_emotion_id:
                 user_data_s["resources"] = user_data_s.get("resources", {})
                 user_data_s["resources"][current_emotion_id] = synthesis
                 save_user_data(username, user_data_s)
                 status = "Synthesis complete and saved!"
             else:
                 status = "Synthesis complete (not saved to specific entry)."
             formatted_synthesis = format_synthesis(synthesis)
             yield status, formatted_synthesis

    synthesize_button.click(
        handle_synthesize,
        inputs=[username_input, user_data_state, current_emotion_id_state, faiss_index_state, faiss_metadata_state, synthesis_query_input],
        outputs=[status_output, synthesis_display]
    )


    # Search Index Button Click
    def handle_search_index(faiss_index_s, faiss_metadata_s, query):
         if not query: return "Please enter search query.", None
         results = search_faiss(faiss_index_s, faiss_metadata_s, query, k=10)
         if not results: return "No results found.", None
         # Format for DataFrame
         df_data = [[res.get('text', '')[:150]+'...', res.get('url', 'N/A'), f"{res.get('score', 0):.2f}"] for res in results]
         results_df = pd.DataFrame(df_data, columns=["Text Snippet", "Source URL", "Score"])
         return f"Found {len(results)} results.", results_df

    search_button.click(
         handle_search_index,
         inputs=[faiss_index_state, faiss_metadata_state, search_query_input],
         outputs=[status_output, search_results_display]
    )

    # --- Community Handlers ---
    def handle_new_post(username, title, content, community_posts_s):
         if not username: return "Enter username first.", community_posts_s, format_community_posts(community_posts_s)
         if not title or not content: return "Title and content required.", community_posts_s, format_community_posts(community_posts_s)

         new_post = {'id': str(uuid.uuid4()), 'user_id': username, 'timestamp': datetime.datetime.now().isoformat(), 'title': title, 'content': content, 'likes': 0, 'comments': []}
         community_posts_s.append(new_post)
         save_community_posts(community_posts_s) # Save updated list
         return "Post submitted.", community_posts_s, format_community_posts(community_posts_s)

    post_button.click(
        handle_new_post,
        inputs=[username_input, post_title_input, post_content_input, community_posts_state],
        outputs=[status_output, community_posts_state, community_feed_display] # Update state and display
    )

    # Initial load of community posts display
    demo.load(lambda posts: format_community_posts(posts), inputs=community_posts_state, outputs=community_feed_display)


    # --- Profile Handlers ---
    def handle_save_goals(username, user_data_s, goal1, goal2):
         if not username: return "Enter username first.", user_data_s
         user_data_s["profile"] = user_data_s.get("profile", {})
         user_data_s["profile"]["goals"] = {"goal1": goal1, "goal2": goal2}
         save_user_data(username, user_data_s)
         return "Goals saved!", user_data_s

    save_goals_button.click(
        handle_save_goals,
        inputs=[username_input, user_data_state, profile_goal1_input, profile_goal2_input],
        outputs=[status_output, user_data_state] # Update user data state
    )


# Launch the Gradio app
if __name__ == "__main__":
    demo.launch(debug=True) # Share=True to create public link if needed