File size: 14,469 Bytes
020b18b 46e626f 020b18b b406d3a 6fc0807 b406d3a 3f55fae 8fa6880 ed75c07 8fa6880 b406d3a 8fa6880 606392d 8fa6880 46e626f bf09359 f5ae479 46e626f e76615c 46e626f 1ef4748 e141b4d 46e626f bcec67a b406d3a 020b18b 46e626f 020b18b 46e626f 020b18b 46e626f 020b18b 46e626f 020b18b 46e626f c28a31b 46e626f c28a31b 46e626f c28a31b 46e626f c28a31b 46e626f ec694e5 c28a31b 46e626f c28a31b 020b18b 46e626f 020b18b 46e626f 020b18b 46e626f 020b18b b406d3a 020b18b 46e626f 020b18b b406d3a 020b18b b406d3a 020b18b 46e626f 020b18b 46e626f b406d3a 46e626f b406d3a 020b18b b406d3a 020b18b b406d3a 020b18b 46e626f b406d3a 020b18b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 |
import os
import logging
from datetime import datetime, timedelta
import json
from collections import defaultdict
import shutil
import re
import argparse
from typing import Dict, Set, Tuple, Optional, List, Union
from log_reader import RemoteLogReader
# List of IP addresses we care about
WHITELIST_IPS_DICT = {
"Chen Gong": ["128.143.67.19"],
"Juyong Jiang": ["175.159.122.63"],
"Kenneth Hamilton": ["109.245.193.97"],
"Marek Suppa": ["158.195.18.232"],
"Max Tian": ["2607:fea8:4f40:4b00:e5b9:9806:6b69:233b", "2607:fea8:4f40:4b00:bcef:571:6124:f01", "2607:fea8:7c9d:3800:d9c0:7295:3e2e:6287", "2607:fea8:7c9d:3800:fd51:2c91:c9e2:3c8"],
"Mengzhao Jia": ["66.254.231.49", "160.32.74.89"],
"Noah Ziems": ["2601:245:c500:92c0:633c:c0d2:dcc1:1f48", "2601:245:c500:92c0:961e:9ac7:e02:c266", "2601:245:c500:92c0:ae74:d1d5:ca3b:da90"],
"Sabina A": ["175.196.44.217", "58.235.174.122", "14.52.175.55"],
"Wenhao Yu": ["2601:600:8d00:9510:1d77:b610:9358:f443", "2601:600:8d00:9510:513f:6c4e:5960:fdc7", "174.164.6.99"],
"Vaisakhi Mishra": ["74.90.222.68"],
"Kumar Shridhar": ["129.132.145.250"],
"Viktor Gal": ["2a02:169:3e9:0:6ce8:e76f:faed:c830"],
"Guangyu Song": ["70.50.179.57", "209.226.139.83"],
"Bhupesh Bishnoi": ["37.65.177.22", "195.220.58.237", "194.57.114.147", "195.220.58.234"],
"Zheng Liu": ["128.143.71.67"],
"Ming Xu": ["2601:600:8d00:9510:185b:955d:275b:7685", "2601:600:8d00:9510:5150:468c:ab7d:518d"],
"Ayush Sunil Munot": ["10.145.76.56"],
"Saiteja Utpala": ["192.168.31.185"]
}
# Username whitelist for each annotator
WHITELIST_USERNAMES_DICT = {
"Chen Gong": ["Chen Gong"],
"Juyong Jiang": ["juyongjiang"],
"Kenneth Hamilton": [],
"Marek Suppa": [],
"Max Tian": [],
"Mengzhao Jia": ["Mengzhao Jia"],
"Noah Ziems": [],
"Sabina A": [],
"Wenhao Yu": [],
"Vaisakhi Mishra": [],
"Kumar Shridhar": [],
"Viktor Gal": [],
"Guangyu Song": [],
"Bhupesh Bishnoi": ["BB"],
"Zheng Liu": ["ZL"],
"Ming Xu": [],
"Ayush Sunil Munot": [],
"Terry Yue Zhuo": ["test"],
"Saiteja Utpala": ["saitejautpala"]
}
# Flatten IP list for backward compatibility
WHITELIST_IPS = [ip for ips in WHITELIST_IPS_DICT.values() for ip in ips]
# Flatten username list for backward compatibility
WHITELIST_USERNAMES = [username for usernames in WHITELIST_USERNAMES_DICT.values() for username in usernames]
logging.basicConfig(level=logging.WARNING)
log = logging.getLogger(__name__)
def get_ip_from_jsonl(content: str) -> Optional[str]:
"""Extract IP from the first line of a JSONL content"""
try:
first_line = content.split('\n')[0]
data = json.loads(first_line)
return data.get('ip')
except Exception as e:
log.error(f"Error extracting IP from content: {e}")
return None
def get_username_from_jsonl(content: str) -> Optional[str]:
"""Extract username from the last line of a JSONL content if it's a vote"""
try:
lines = [line.strip() for line in content.split('\n') if line.strip()]
if not lines:
return None
last_line = json.loads(lines[-1])
if last_line.get('type') == 'vote':
return last_line.get('username')
return None
except Exception as e:
log.error(f"Error extracting username from content: {e}")
return None
def get_chat_session_id(file_name: str, content: str = None) -> Optional[str]:
"""Extract chat_session_id based on the file location:
- For files under conv_logs: extract from filename
- For files under sandbox_logs: read from file content
"""
try:
if 'conv_logs' in file_name:
# Extract from filename for conv_logs
match = re.match(r'conv-log-([a-f0-9]+)\.json', file_name)
if match:
return match.group(1)
elif 'sandbox_logs' in file_name and content:
# Read from file content for sandbox_logs
data = json.loads(content)
return data['sandbox_state'].get('chat_session_id')
return None
except Exception as e:
log.error(f"Error getting chat_session_id from {file_name}: {e}")
return None
def get_sandbox_session_ids(reader: 'RemoteLogReader', date_str: str) -> Set[str]:
"""Get all chat_session_ids from sandbox logs for a given date"""
session_ids = set()
try:
sandbox_logs = reader.get_sandbox_logs(date_str)
for log in sandbox_logs:
if isinstance(log, dict):
session_id = log.get('sandbox_state', {}).get('chat_session_id')
if session_id:
session_ids.add(session_id)
except Exception as e:
log.error(f"Error getting sandbox session IDs for date {date_str}: {e}")
return session_ids
def get_file_data(content: str) -> Tuple[Optional[str], Optional[str], bool]:
"""Read file content and return IP, username, and vote condition status"""
try:
lines = [line.strip() for line in content.split('\n') if line.strip()]
if not lines:
return None, None, False
# Get IP from first line
try:
first_line_data = json.loads(lines[0])
ip = first_line_data.get('ip')
except json.JSONDecodeError:
ip = None
# Early check if IP is in whitelist
ip_in_whitelist = ip in WHITELIST_IPS
# Find the vote line (if any)
username = None
vote_conditions_met = False
vote_line_index = -1
# Search for the vote line
for i, line in enumerate(lines):
try:
line_data = json.loads(line)
if line_data.get('type') == 'vote':
vote_line_index = i
break
except json.JSONDecodeError:
continue
# If we found a vote line, check conditions and get username
if vote_line_index >= 0:
try:
vote_line_data = json.loads(lines[vote_line_index])
# Only try to get username if the key exists
if 'username' in vote_line_data:
username = vote_line_data.get('username')
feedback = vote_line_data.get('feedback')
# Check vote conditions: type is vote, feedback has 6 items, and at least 4 lines (2 rounds of chat)
# Only count lines up to and including the vote line
relevant_lines = lines[:vote_line_index + 1]
vote_conditions_met = (
isinstance(feedback, dict) and
len([1 for v in feedback.values() if v]) == 6 and
len(relevant_lines) >= 4
)
except (json.JSONDecodeError, TypeError):
pass
# Check if username is in whitelist (if username exists)
username_in_whitelist = username in WHITELIST_USERNAMES if username else False
# Early return if neither IP nor username is in whitelist
if not (ip_in_whitelist or username_in_whitelist):
return ip, username, False
return ip, username, vote_conditions_met
except Exception as e:
log.error(f"Error processing file content: {e}")
return None, None, False
def get_annotator_name(ip: Optional[str], username: Optional[str]) -> Optional[str]:
"""Get annotator name from IP or username"""
# Check IP first
if ip:
for name, ips in WHITELIST_IPS_DICT.items():
if ip in ips:
return name
# Check username if IP didn't match
if username:
for name, usernames in WHITELIST_USERNAMES_DICT.items():
if username in usernames:
return name
return None
def count_files_per_annotator(reader: 'RemoteLogReader', start_date_str: str = "2025_02_18") -> Dict[str, int]:
"""Count files per annotator name from the given start date, considering both IP and username"""
# Convert start date string to datetime
start_date = datetime.strptime(start_date_str, "%Y_%m_%d")
name_counts = defaultdict(int)
try:
# Get current date for iteration
current_date = start_date
today = datetime.now()
while current_date <= today:
date_str = current_date.strftime("%Y_%m_%d")
try:
# Get conversation logs for battle_anony mode
conv_logs = reader.get_conv_logs(date_str)
battle_anony_logs = conv_logs.get('battle_anony', {})
# Process each conversation
for conv_id, messages in battle_anony_logs.items():
if messages:
# Convert messages to file content format
content = '\n'.join(json.dumps(msg) for msg in messages)
ip, username, vote_conditions_met = get_file_data(content)
if vote_conditions_met:
# Get annotator name from either IP or username
annotator_name = get_annotator_name(ip, username)
if annotator_name:
name_counts[annotator_name] += 1
except Exception as e:
log.error(f"Error processing logs for date {date_str}: {e}")
# Move to next day
current_date += timedelta(days=1)
except Exception as e:
log.error(f"Error accessing logs: {e}")
return dict(name_counts)
def download_files_by_name(reader: 'RemoteLogReader', start_date_str: str = "2025_02_18", check_sandbox: bool = True) -> None:
"""Download files and organize them by annotator name
Args:
reader: RemoteLogReader instance
start_date_str: The start date in YYYY_MM_DD format
check_sandbox: Whether to check for matching sandbox logs
"""
# Create base data directory
data_dir = os.path.join(os.getcwd(), "data")
os.makedirs(data_dir, exist_ok=True)
# Convert start date string to datetime
start_date = datetime.strptime(start_date_str, "%Y_%m_%d")
try:
# Get current date for iteration
current_date = start_date
today = datetime.now()
while current_date <= today:
date_str = current_date.strftime("%Y_%m_%d")
# Get all sandbox session IDs for this date
sandbox_session_ids = get_sandbox_session_ids(reader, date_str) if check_sandbox else set()
try:
# Get conversation logs for battle_anony mode
conv_logs = reader.get_conv_logs(date_str)
battle_anony_logs = conv_logs.get('battle_anony', {})
# Process each conversation
for conv_id, messages in battle_anony_logs.items():
if not messages:
continue
# Convert messages to file content
content = '\n'.join(json.dumps(msg) for msg in messages)
ip = get_ip_from_jsonl(content)
username = get_username_from_jsonl(content)
# Get annotator name from either IP or username
annotator_name = get_annotator_name(ip, username)
if annotator_name:
# Create directory structure for this name
name_dir = os.path.join(data_dir, annotator_name)
valid_dir = os.path.join(name_dir, "valid")
invalid_dir = os.path.join(name_dir, "invalid")
os.makedirs(valid_dir, exist_ok=True)
os.makedirs(invalid_dir, exist_ok=True)
# Check if chat_session_id exists in sandbox logs
if check_sandbox:
has_sandbox = conv_id in sandbox_session_ids
target_dir = valid_dir if has_sandbox else invalid_dir
else:
# When sandbox checking is disabled, put everything in valid
target_dir = valid_dir
# Save the file
file_name = f"conv-log-{conv_id}.json"
local_file_path = os.path.join(target_dir, file_name)
try:
with open(local_file_path, 'w') as f:
f.write(content)
log.info(f"Saved {file_name} to {target_dir}")
except Exception as e:
log.error(f"Error saving file {file_name}: {e}")
except Exception as e:
log.error(f"Error processing logs for date {date_str}: {e}")
# Move to next day
current_date += timedelta(days=1)
except Exception as e:
log.error(f"Error accessing logs: {e}")
def main():
# Initialize RemoteLogReader
reader = RemoteLogReader()
# Add argument parser for optional parameters
parser = argparse.ArgumentParser(description='Download and organize conversation files by annotator name')
parser.add_argument('--sandbox-check', action='store_true', help='Check for matching sandbox logs')
parser.add_argument('--download', action='store_true', help='Enable file download')
args = parser.parse_args()
# Download files if enabled
if args.download:
print("\nDownloading files and organizing by annotator name...")
download_files_by_name(reader, check_sandbox=args.sandbox_check)
# Count and display statistics
name_counts = count_files_per_annotator(reader)
print("\nFile counts per annotator:")
for name, count in sorted(name_counts.items(), key=lambda x: x[1], reverse=True):
print(f"Name: {name:<20} Count: {count}")
if __name__ == "__main__":
main() |