Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
#
|
2 |
import gradio as gr
|
3 |
import spaces
|
4 |
import torch
|
@@ -278,6 +278,7 @@ class TetherProAnalyzer:
|
|
278 |
for msg_data in messages_data:
|
279 |
analysis = MessageAnalysis(
|
280 |
timestamp=msg_data.get('timestamp', datetime.now().isoformat()),
|
|
|
281 |
message_id=msg_data.get('id', f"msg_{len(self.conversation_history)}"),
|
282 |
text=msg_data.get('text', ''),
|
283 |
sender=msg_data.get('sender', 'unknown'),
|
@@ -370,7 +371,7 @@ class TetherProAnalyzer:
|
|
370 |
df = df.sort_values('timestamp')
|
371 |
return df
|
372 |
|
373 |
-
def
|
374 |
"""Detect escalating abuse patterns over time"""
|
375 |
if len(df) < 5:
|
376 |
return {'detected': False, 'reason': 'insufficient_data'}
|
@@ -446,7 +447,7 @@ class TetherProAnalyzer:
|
|
446 |
|
447 |
return {
|
448 |
'detected': True,
|
449 |
-
'cycle_count': min(len(peaks
|
450 |
'avg_cycle_length_days': round(avg_cycle_length, 1),
|
451 |
'pattern_type': 'tension_escalation_reconciliation',
|
452 |
'confidence': min(len(peaks) / 3.0, 1.0),
|
@@ -574,6 +575,8 @@ class TetherProAnalyzer:
|
|
574 |
|
575 |
# Day of week patterns
|
576 |
if len(high_abuse) > 0:
|
|
|
|
|
577 |
day_counts = high_abuse['day_of_week'].value_counts()
|
578 |
weekend_abuse = len(high_abuse[high_abuse['is_weekend']]) / len(high_abuse)
|
579 |
|
@@ -675,7 +678,7 @@ class TetherProAnalyzer:
|
|
675 |
timestamps = []
|
676 |
for msg in self.conversation_history:
|
677 |
try:
|
678 |
-
timestamps.append(datetime.fromisoformat(msg.timestamp.replace('Z', '+00:00'))
|
679 |
except:
|
680 |
continue
|
681 |
|
@@ -831,8 +834,8 @@ def analyze_temporal_patterns(messages_json: str):
|
|
831 |
**Basic Statistics:**
|
832 |
{json.dumps(results.get('basic_stats', {}), indent=2)}
|
833 |
|
834 |
-
**Recommendations
|
835 |
-
|
836 |
|
837 |
return summary, None, "Upload more conversation history for comprehensive temporal analysis."
|
838 |
|
@@ -889,6 +892,7 @@ def analyze_temporal_patterns(messages_json: str):
|
|
889 |
combinations = results['temporal_analysis']['pattern_combinations']
|
890 |
if combinations:
|
891 |
for combo in combinations:
|
|
|
892 |
severity_emoji = "🚨" if combo['severity'] == 'critical' else "⚠️"
|
893 |
report += f"""
|
894 |
**{severity_emoji} {combo['name']}**
|
@@ -994,8 +998,6 @@ def create_sample_data():
|
|
994 |
|
995 |
return json.dumps(sample_data, indent=2)
|
996 |
|
997 |
-
|
998 |
-
|
999 |
def create_tether_pro_interface():
|
1000 |
css = """
|
1001 |
.gradio-container {
|
@@ -1045,7 +1047,8 @@ def create_tether_pro_interface():
|
|
1045 |
outputs=input_json
|
1046 |
)
|
1047 |
|
1048 |
-
demo
|
1049 |
|
1050 |
if __name__ == "__main__":
|
1051 |
-
create_tether_pro_interface()
|
|
|
|
1 |
+
# Import necessary libraries
|
2 |
import gradio as gr
|
3 |
import spaces
|
4 |
import torch
|
|
|
278 |
for msg_data in messages_data:
|
279 |
analysis = MessageAnalysis(
|
280 |
timestamp=msg_data.get('timestamp', datetime.now().isoformat()),
|
281 |
+
message_id=msg_data.get('id', f"msg_{len(self.conversation_history
|
282 |
message_id=msg_data.get('id', f"msg_{len(self.conversation_history)}"),
|
283 |
text=msg_data.get('text', ''),
|
284 |
sender=msg_data.get('sender', 'unknown'),
|
|
|
371 |
df = df.sort_values('timestamp')
|
372 |
return df
|
373 |
|
374 |
+
def _detect_escalation_t trends(self, df: pd.DataFrame) -> Dict:
|
375 |
"""Detect escalating abuse patterns over time"""
|
376 |
if len(df) < 5:
|
377 |
return {'detected': False, 'reason': 'insufficient_data'}
|
|
|
447 |
|
448 |
return {
|
449 |
'detected': True,
|
450 |
+
'cycle_count': min(len(peaks, len(valleys))),
|
451 |
'avg_cycle_length_days': round(avg_cycle_length, 1),
|
452 |
'pattern_type': 'tension_escalation_reconciliation',
|
453 |
'confidence': min(len(peaks) / 3.0, 1.0),
|
|
|
575 |
|
576 |
# Day of week patterns
|
577 |
if len(high_abuse) > 0:
|
578 |
+
day_counts = high_abuse['day_of_week'].value_counts()
|
579 |
+
weekend_abuse = len(high_abuse[
|
580 |
day_counts = high_abuse['day_of_week'].value_counts()
|
581 |
weekend_abuse = len(high_abuse[high_abuse['is_weekend']]) / len(high_abuse)
|
582 |
|
|
|
678 |
timestamps = []
|
679 |
for msg in self.conversation_history:
|
680 |
try:
|
681 |
+
timestamps.append(datetime.fromisoformat(msg.timestamp.replace('Z', '+00:00'))
|
682 |
except:
|
683 |
continue
|
684 |
|
|
|
834 |
**Basic Statistics:**
|
835 |
{json.dumps(results.get('basic_stats', {}), indent=2)}
|
836 |
|
837 |
+
**Recommendations:"""
|
838 |
+
+ '\n'.join([f"• {rec}" for rec in results.get('recommendations', [])])
|
839 |
|
840 |
return summary, None, "Upload more conversation history for comprehensive temporal analysis."
|
841 |
|
|
|
892 |
combinations = results['temporal_analysis']['pattern_combinations']
|
893 |
if combinations:
|
894 |
for combo in combinations:
|
895 |
+
severity_emoji = "🚨" if combo['severity'] == 'critical
|
896 |
severity_emoji = "🚨" if combo['severity'] == 'critical' else "⚠️"
|
897 |
report += f"""
|
898 |
**{severity_emoji} {combo['name']}**
|
|
|
998 |
|
999 |
return json.dumps(sample_data, indent=2)
|
1000 |
|
|
|
|
|
1001 |
def create_tether_pro_interface():
|
1002 |
css = """
|
1003 |
.gradio-container {
|
|
|
1047 |
outputs=input_json
|
1048 |
)
|
1049 |
|
1050 |
+
return demo
|
1051 |
|
1052 |
if __name__ == "__main__":
|
1053 |
+
demo = create_tether_pro_interface()
|
1054 |
+
demo.launch()
|