File size: 7,524 Bytes
553537a
 
5c3b4a6
a11d742
628d40e
a9d5552
 
a1c1173
 
 
 
876b12f
a9d5552
 
 
a1c1173
 
 
876b12f
553537a
 
a11d742
 
a9d5552
628d40e
 
a9d5552
 
 
 
 
 
628d40e
5c3b4a6
 
 
a1c1173
 
5c3b4a6
a1c1173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c3b4a6
a1c1173
553537a
a1c1173
 
 
 
 
5c3b4a6
a1c1173
 
 
 
 
 
 
876b12f
5c3b4a6
 
 
628d40e
5c3b4a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
628d40e
a1c1173
 
a11d742
 
 
 
 
a1c1173
5c3b4a6
 
 
a1c1173
9a2420b
a11d742
 
876b12f
a1c1173
 
 
 
 
 
628d40e
a11d742
 
 
 
 
 
 
5c3b4a6
a11d742
 
 
 
 
 
55cdb25
a11d742
 
 
 
 
 
 
 
 
55cdb25
 
a11d742
 
55cdb25
 
a11d742
 
 
 
a1c1173
5c3b4a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1c1173
628d40e
a1c1173
 
876b12f
a1c1173
a11d742
 
a1c1173
a11d742
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
from fastapi import APIRouter, HTTPException
from pydantic import BaseModel, HttpUrl
from typing import Dict, Any, List, Literal
import logging
import os
from supabase import AsyncClient
from dotenv import load_dotenv

from mediaunmasked.scrapers.article_scraper import ArticleScraper
from mediaunmasked.analyzers.scoring import MediaScorer
from mediaunmasked.utils.logging_config import setup_logging

# Load environment variables
load_dotenv()

# Initialize logging
setup_logging()
logger = logging.getLogger(__name__)

# Initialize router and dependencies
router = APIRouter(tags=["analysis"])
scraper = ArticleScraper()

# Get Supabase credentials
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")

# Initialize Supabase client
if not SUPABASE_URL or not SUPABASE_KEY:
    raise Exception("Supabase credentials not found in environment variables")

supabase = AsyncClient(SUPABASE_URL, SUPABASE_KEY)

# Define analysis mode type
AnalysisMode = Literal['ai', 'traditional']

class ArticleRequest(BaseModel):
    url: HttpUrl
    use_ai: bool = True  # Default to AI-powered analysis

class MediaScoreDetails(BaseModel):
    headline_analysis: Dict[str, Any]
    sentiment_analysis: Dict[str, Any]
    bias_analysis: Dict[str, Any]
    evidence_analysis: Dict[str, Any]

class MediaScore(BaseModel):
    media_unmasked_score: float
    rating: str
    details: MediaScoreDetails

class AnalysisResponse(BaseModel):
    headline: str
    content: str
    sentiment: str
    bias: str
    bias_score: float
    bias_percentage: float
    media_score: MediaScore
    analysis_mode: AnalysisMode

@router.post("/analyze", response_model=AnalysisResponse)
async def analyze_article(request: ArticleRequest) -> AnalysisResponse:
    """
    Analyze an article for bias, sentiment, and credibility.
    
    Args:
        request: ArticleRequest containing the URL to analyze and analysis preferences
        
    Returns:
        AnalysisResponse with complete analysis results
        
    Raises:
        HTTPException: If scraping or analysis fails
    """
    try:
        # Determine analysis mode
        analysis_mode: AnalysisMode = 'ai' if request.use_ai else 'traditional'
        logger.info(f"Analyzing article: {request.url} (Analysis Mode: {analysis_mode})")
        
        # Check cache with both URL and analysis mode
        try:
            cached_result = await supabase.table('article_analysis') \
                .select('*') \
                .eq('url', str(request.url)) \
                .eq('analysis_mode', analysis_mode) \
                .limit(1) \
                .single() \
                .execute()
            
            if cached_result and cached_result.data:
                logger.info(f"Found cached analysis for URL with {analysis_mode} mode")
                return AnalysisResponse.parse_obj(cached_result.data)
                
        except Exception as cache_error:
            logger.warning(f"Cache lookup failed: {str(cache_error)}")
            # Continue with analysis if cache lookup fails
        
        # Scrape article
        article = scraper.scrape_article(str(request.url))
        if not article:
            raise HTTPException(
                status_code=400,
                detail="Failed to scrape article content"
            )
        
        # Initialize scorer with specified analysis preference
        scorer = MediaScorer(use_ai=request.use_ai)
        
        # Analyze content
        analysis = scorer.calculate_media_score(
            article["headline"],
            article["content"]
        )
        
        # Log raw values for debugging
        logger.info("Raw values:")
        logger.info(f"media_unmasked_score type: {type(analysis['media_unmasked_score'])}")
        logger.info(f"media_unmasked_score value: {analysis['media_unmasked_score']}")
        
        # Prepare response data
        response_dict = {
            "headline": str(article['headline']),
            "content": str(article['content']),
            "sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
            "bias": str(analysis['details']['bias_analysis']['bias']),
            "bias_score": float(analysis['details']['bias_analysis']['bias_score']),
            "bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage']),
            "analysis_mode": analysis_mode,
            "media_score": {
                "media_unmasked_score": float(analysis['media_unmasked_score']),
                "rating": str(analysis['rating']),
                "details": {
                    "headline_analysis": {
                        "headline_vs_content_score": float(analysis['details']['headline_analysis']['headline_vs_content_score']),
                        "flagged_phrases": analysis['details']['headline_analysis'].get('flagged_phrases', [])
                    },
                    "sentiment_analysis": {
                        "sentiment": str(analysis['details']['sentiment_analysis']['sentiment']),
                        "manipulation_score": float(analysis['details']['sentiment_analysis']['manipulation_score']),
                        "flagged_phrases": list(analysis['details']['sentiment_analysis']['flagged_phrases'])
                    },
                    "bias_analysis": {
                        "bias": str(analysis['details']['bias_analysis']['bias']),
                        "bias_score": float(analysis['details']['bias_analysis']['bias_score']),
                        "bias_percentage": float(analysis['details']['bias_analysis']['bias_percentage']),
                        "flagged_phrases": list(analysis['details']['bias_analysis']['flagged_phrases'])
                    },
                    "evidence_analysis": {
                        "evidence_based_score": float(analysis['details']['evidence_analysis']['evidence_based_score']),
                        "flagged_phrases": list(analysis['details']['evidence_analysis']['flagged_phrases'])
                    }
                }
            }
        }
        
        # Save to Supabase with analysis mode
        try:
            await supabase.table('article_analysis').upsert({
                'url': str(request.url),
                'headline': response_dict['headline'],
                'content': response_dict['content'],
                'sentiment': response_dict['sentiment'],
                'bias': response_dict['bias'],
                'bias_score': response_dict['bias_score'],
                'bias_percentage': response_dict['bias_percentage'],
                'media_score': response_dict['media_score'],
                'analysis_mode': analysis_mode,  # Store the analysis mode
                'created_at': 'now()'  # Use server timestamp
            }, on_conflict='url,analysis_mode').execute()  # Specify composite unique constraint
            
            logger.info(f"Saved analysis to database with mode: {analysis_mode}")
            
        except Exception as db_error:
            logger.error(f"Failed to save to database: {str(db_error)}")
            # Continue since we can still return the analysis even if saving fails
        
        # Return the response
        return AnalysisResponse.parse_obj(response_dict)
        
    except Exception as e:
        logger.error(f"Analysis failed: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=500,
            detail=f"Analysis failed: {str(e)}"
        )