SamanthaStorm commited on
Commit
0933842
·
verified ·
1 Parent(s): 6a26e95

Create utils.py

Browse files
Files changed (1) hide show
  1. utils.py +94 -0
utils.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # utils.py
2
+ # (formerly the top of app.py up through just before the dataclass definitions)
3
+
4
+ # Import necessary libraries
5
+ import gradio as gr
6
+ import spaces
7
+ import torch
8
+ import numpy as np
9
+ import pandas as pd
10
+ from datetime import datetime, timedelta
11
+ from collections import defaultdict, Counter
12
+ import json
13
+ from typing import List, Dict, Tuple, Optional
14
+ from dataclasses import dataclass, asdict
15
+ from enum import Enum
16
+ import matplotlib.pyplot as plt
17
+ import io
18
+ from PIL import Image
19
+ import logging
20
+ import re
21
+
22
+ # Set up logging
23
+ logging.basicConfig(level=logging.INFO)
24
+ logger = logging.getLogger(__name__)
25
+
26
+ # Device configuration
27
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
28
+ logger.info(f"Using device: {device}")
29
+
30
+ # =============================================================================
31
+ # SINGLE-MESSAGE ANALYSIS UTILITIES
32
+ # =============================================================================
33
+
34
+ LABELS = [
35
+ "recovery phase", "control", "gaslighting", "guilt tripping", "dismissiveness",
36
+ "blame shifting", "nonabusive", "projection", "insults",
37
+ "contradictory statements", "obscure language",
38
+ "veiled threats", "stalking language", "false concern",
39
+ "false equivalence", "future faking"
40
+ ]
41
+
42
+ SENTIMENT_LABELS = ["supportive", "undermining"]
43
+
44
+ THRESHOLDS = {
45
+ "recovery phase": 0.278, "control": 0.287, "gaslighting": 0.144,
46
+ "guilt tripping": 0.220, "dismissiveness": 0.142, "blame shifting": 0.183,
47
+ "projection": 0.253, "insults": 0.247, "contradictory statements": 0.200,
48
+ "obscure language": 0.455, "nonabusive": 0.281, "veiled threats": 0.310,
49
+ "stalking language": 0.339, "false concern": 0.334, "false equivalence": 0.317,
50
+ "future faking": 0.290
51
+ }
52
+
53
+ def analyze_single_message_complete(text: str) -> Dict:
54
+ """Run all detectors on a single message and return full analysis."""
55
+ try:
56
+ # (Original code for running your abuse model, DARVO model,
57
+ # boundary health, pattern detection, sentiment, tone, etc.
58
+ # unchanged.)
59
+ # …
60
+ # At the end, return a dict with keys:
61
+ # 'abuse_score', 'darvo_score', 'boundary_health', 'detected_patterns',
62
+ # 'emotional_tone', 'risk_level', 'sentiment'
63
+ pass # keep your original implementation here
64
+ except Exception as e:
65
+ logger.error(f"Error in analyze_single_message_complete: {e}")
66
+ return {
67
+ 'abuse_score': 0.0,
68
+ 'darvo_score': 0.0,
69
+ 'boundary_health': 'unknown',
70
+ 'detected_patterns': [],
71
+ 'emotional_tone': 'neutral',
72
+ 'risk_level': 'low',
73
+ 'sentiment': 'supportive'
74
+ }
75
+
76
+ def determine_boundary_health(prediction) -> str:
77
+ """Map raw boundary model output to 'healthy' or 'unhealthy'."""
78
+ # (Insert your original logic.)
79
+ pass
80
+
81
+ def categorize_emotional_tone(emotions, patterns, abuse_score, sentiment) -> str:
82
+ """Combine emotion vectors, detected patterns, scores to pick a tone label."""
83
+ # (Insert your original logic.)
84
+ pass
85
+
86
+ def categorize_risk_level(abuse_score: float) -> str:
87
+ """Turn an abuse_score into 'low', 'moderate', or 'high'."""
88
+ # (Insert your original logic.)
89
+ pass
90
+
91
+ def categorize_risk_trend(scores: List[float]) -> str:
92
+ """Given a time series of scores, classify the trend."""
93
+ # (Insert your original logic.)
94
+ pass