Raiff1982 commited on
Commit
35df41b
·
verified ·
1 Parent(s): d59274e

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -422
app.py DELETED
@@ -1,422 +0,0 @@
1
- import json
2
- import os
3
- import hashlib
4
- import numpy as np
5
- from collections import defaultdict
6
- from datetime import datetime, timedelta
7
- import filelock
8
- import pathlib
9
- import shutil
10
- import sqlite3
11
- from rapidfuzz import fuzz
12
- import unittest
13
- import secrets
14
- import re
15
- import nltk
16
- from nltk.tokenize import word_tokenize
17
- from nltk.stem import WordNetLemmatizer
18
- import logging
19
- import time
20
- from tenacity import retry, stop_after_attempt, wait_exponential
21
- from concurrent.futures import ThreadPoolExecutor
22
- import gradio as gr
23
-
24
- # Download required NLTK data
25
- def _tokenize_and_lemmatize(self, signal_lower):
26
- return signal_lower.split() # Simple split as a fallback
27
-
28
- try:
29
- nltk.data.find('tokenizers/punkt')
30
- nltk.data.find('corpora/wordnet')
31
- except LookupError:
32
- nltk.download('punkt')
33
- nltk.download('wordnet')
34
-
35
- # Set up logging
36
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
37
- logger = logging.getLogger(__name__)
38
-
39
- class LockManager:
40
- """Abstract locking mechanism for file or database operations."""
41
- def __init__(self, lock_path):
42
- self.lock = filelock.FileLock(lock_path, timeout=10)
43
-
44
- def __enter__(self):
45
- self.lock.acquire()
46
- return self
47
-
48
- def __exit__(self, exc_type, exc_val, exc_tb):
49
- self.lock.release()
50
-
51
- class NexisSignalEngine:
52
- def __init__(self, memory_path, entropy_threshold=0.08, config_path="config.json", max_memory_entries=10000, memory_ttl_days=30, fuzzy_threshold=80, max_db_size_mb=100):
53
- """
54
- Initialize the NexisSignalEngine for signal processing and analysis.
55
- """
56
- self.memory_path = self._validate_path(memory_path)
57
- self.entropy_threshold = entropy_threshold
58
- self.max_memory_entries = max_memory_entries
59
- self.memory_ttl = timedelta(days=memory_ttl_days)
60
- self.fuzzy_threshold = fuzzy_threshold
61
- self.max_db_size_mb = max_db_size_mb
62
- self.lemmatizer = WordNetLemmatizer()
63
- self.token_cache = {}
64
- self.config = self._load_config(config_path)
65
- self.memory = self._load_memory()
66
- self.cache = defaultdict(list)
67
- self.perspectives = ["Colleen", "Luke", "Kellyanne"]
68
- self._init_sqlite()
69
-
70
- def _validate_path(self, path):
71
- path = pathlib.Path(path).resolve()
72
- if not path.suffix == '.db':
73
- raise ValueError("Memory path must be a .db file")
74
- return str(path)
75
-
76
- def _load_config(self, config_path):
77
- default_config = {
78
- "ethical_terms": ["hope", "truth", "resonance", "repair"],
79
- "entropic_terms": ["corruption", "instability", "malice", "chaos"],
80
- "risk_terms": ["manipulate", "exploit", "bypass", "infect", "override"],
81
- "virtue_terms": ["hope", "grace", "resolve"]
82
- }
83
- if os.path.exists(config_path):
84
- try:
85
- with open(config_path, 'r') as f:
86
- config = json.load(f)
87
- default_config.update(config)
88
- except json.JSONDecodeError:
89
- logger.warning(f"Invalid config file at {config_path}. Using defaults.")
90
- required_keys = ["ethical_terms", "entropic_terms", "risk_terms", "virtue_terms"]
91
- missing_keys = [k for k in required_keys if k not in default_config or not default_config[k]]
92
- if missing_keys:
93
- raise ValueError(f"Config missing required keys: {missing_keys}")
94
- return default_config
95
-
96
- def _init_sqlite(self):
97
- with sqlite3.connect(self.memory_path) as conn:
98
- conn.execute("""
99
- CREATE TABLE IF NOT EXISTS memory (
100
- hash TEXT PRIMARY KEY,
101
- record JSON,
102
- timestamp TEXT,
103
- integrity_hash TEXT
104
- )
105
- """)
106
- conn.execute("""
107
- CREATE VIRTUAL TABLE IF NOT EXISTS memory_fts
108
- USING FTS5(input, intent_signature, reasoning, verdict)
109
- """)
110
- conn.commit()
111
-
112
- @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
113
- def _load_memory(self):
114
- memory = {}
115
- try:
116
- with sqlite3.connect(self.memory_path) as conn:
117
- cursor = conn.cursor()
118
- cursor.execute("SELECT hash, record, integrity_hash FROM memory")
119
- for hash_val, record_json, integrity_hash in cursor.fetchall():
120
- record = json.loads(record_json)
121
- computed_hash = hashlib.sha256(json.dumps(record, sort_keys=True).encode()).hexdigest()
122
- if computed_hash != integrity_hash:
123
- logger.warning(f"Tampered record detected for hash {hash_val}")
124
- continue
125
- memory[hash_val] = record
126
- except sqlite3.Error as e:
127
- logger.error(f"Error loading memory: {e}")
128
- return memory
129
-
130
- @retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
131
- def _save_memory(self):
132
- def default_serializer(o):
133
- if isinstance(o, complex):
134
- return {"real": o.real, "imag": o.imag}
135
- if isinstance(o, np.ndarray):
136
- return o.tolist()
137
- if isinstance(o, (np.int64, np.float64)):
138
- return int(o) if o.is_integer() else float(o)
139
- raise TypeError(f"Object of type {o.__class__.__name__} is not JSON serializable")
140
-
141
- with LockManager(f"{self.memory_path}.lock"):
142
- with sqlite3.connect(self.memory_path) as conn:
143
- cursor = conn.cursor()
144
- for hash_val, record in self.memory.items():
145
- record_json = json.dumps(record, default=default_serializer)
146
- integrity_hash = hashlib.sha256(json.dumps(record, sort_keys=True, default=default_serializer).encode()).hexdigest()
147
- intent_signature = record.get('intent_signature', {})
148
- intent_str = f"suspicion_score:{intent_signature.get('suspicion_score', 0)} entropy_index:{intent_signature.get('entropy_index', 0)}"
149
- reasoning = record.get('reasoning', {})
150
- reasoning_str = " ".join(f"{k}:{v}" for k, v in reasoning.items())
151
- cursor.execute("""
152
- INSERT OR REPLACE INTO memory (hash, record, timestamp, integrity_hash)
153
- VALUES (?, ?, ?, ?)
154
- """, (hash_val, record_json, record['timestamp'], integrity_hash))
155
- cursor.execute("""
156
- INSERT OR REPLACE INTO memory_fts (rowid, input, intent_signature, reasoning, verdict)
157
- VALUES (?, ?, ?, ?, ?)
158
- """, (
159
- hash_val,
160
- record['input'],
161
- intent_str,
162
- reasoning_str,
163
- record.get('verdict', '')
164
- ))
165
- conn.commit()
166
-
167
- def _prune_and_rotate_memory(self):
168
- now = datetime.utcnow()
169
- with LockManager(f"{self.memory_path}.lock"):
170
- with sqlite3.connect(self.memory_path) as conn:
171
- cursor = conn.cursor()
172
- cursor.execute("""
173
- DELETE FROM memory
174
- WHERE timestamp < ?
175
- """, ((now - self.memory_ttl).isoformat(),))
176
- cursor.execute("DELETE FROM memory_fts WHERE rowid NOT IN (SELECT hash FROM memory)")
177
- conn.commit()
178
- cursor.execute("SELECT COUNT(*) FROM memory")
179
- count = cursor.fetchone()[0]
180
- db_size_mb = os.path.getsize(self.memory_path) / (1024 * 1024)
181
- if count >= self.max_memory_entries or db_size_mb >= self.max_db_size_mb:
182
- self._rotate_memory_file()
183
- cursor.execute("DELETE FROM memory")
184
- cursor.execute("DELETE FROM memory_fts")
185
- conn.commit()
186
- self.memory = {}
187
-
188
- def _rotate_memory_file(self):
189
- archive_path = f"{self.memory_path}.{datetime.utcnow().strftime('%Y%m%d%H%M%S')}.bak"
190
- if os.path.exists(self.memory_path):
191
- shutil.move(self.memory_path, archive_path)
192
- self._init_sqlite()
193
-
194
- def _hash(self, signal):
195
- return hashlib.sha256(signal.encode()).hexdigest()
196
-
197
- def _rotate_vector(self, signal):
198
- seed = int(self._hash(signal)[:8], 16) % (2**32)
199
- secrets_generator = secrets.SystemRandom()
200
- secrets_generator.seed(seed)
201
- vec = np.array([secrets_generator.gauss(0, 1) + 1j * secrets_generator.gauss(0, 1) for _ in range(2)])
202
- theta = np.pi / 4
203
- rot = np.array([[np.cos(theta), -np.sin(theta)],
204
- [np.sin(theta), np.cos(theta)]])
205
- rotated = np.dot(rot, vec)
206
- return rotated, [{"real": v.real, "imag": v.imag} for v in vec]
207
-
208
- def _entanglement_tensor(self, signal_vec):
209
- matrix = np.array([[1, 0.5], [0.5, 1]])
210
- return np.dot(matrix, signal_vec)
211
-
212
- def _resonance_equation(self, signal):
213
- freqs = [ord(c) % 13 for c in signal[:1000] if c.isalpha()]
214
- if not freqs:
215
- return [0.0, 0.0, 0.0]
216
- spectrum = np.fft.fft(freqs)
217
- norm = np.linalg.norm(spectrum.real)
218
- normalized = spectrum.real / (norm if norm != 0 else 1)
219
- return normalized[:3].tolist()
220
-
221
- def _tokenize_and_lemmatize(self, signal_lower):
222
- if signal_lower in self.token_cache:
223
- return self.token_cache[signal_lower]
224
- tokens = word_tokenize(signal_lower)
225
- lemmatized = [self.lemmatizer.lemmatize(token) for token in tokens]
226
- ngrams = []
227
- for n in range(2, 4):
228
- for i in range(len(signal_lower) - n + 1):
229
- ngram = signal_lower[i:i+n]
230
- ngrams.append(self.lemmatizer.lemmatize(re.sub(r'[^a-z]', '', ngram)))
231
- result = lemmatized + [ng for ng in ngrams if ng]
232
- self.token_cache[signal_lower] = result
233
- return result
234
-
235
- def _entropy(self, signal_lower, tokens):
236
- unique = set(tokens)
237
- term_count = 0
238
- for term in self.config["entropic_terms"]:
239
- lemmatized_term = self.lemmatizer.lemmatize(term)
240
- for token in tokens:
241
- if fuzz.ratio(lemmatized_term, token) >= self.fuzzy_threshold:
242
- term_count += 1
243
- return term_count / max(len(unique), 1)
244
-
245
- def _tag_ethics(self, signal_lower, tokens):
246
- for term in self.config["ethical_terms"]:
247
- lemmatized_term = self.lemmatizer.lemmatize(term)
248
- for token in tokens:
249
- if fuzz.ratio(lemmatized_term, token) >= self.fuzzy_threshold:
250
- return "aligned"
251
- return "unaligned"
252
-
253
- def _predict_intent_vector(self, signal_lower, tokens):
254
- suspicion_score = 0
255
- for term in self.config["risk_terms"]:
256
- lemmatized_term = self.lemmatizer.lemmatize(term)
257
- for token in tokens:
258
- if fuzz.ratio(lemmatized_term, token) >= self.fuzzy_threshold:
259
- suspicion_score += 1
260
- entropy_index = round(self._entropy(signal_lower, tokens), 3)
261
- ethical_alignment = self._tag_ethics(signal_lower, tokens)
262
- harmonic_profile = self._resonance_equation(signal_lower)
263
- volatility = round(np.std(harmonic_profile), 3)
264
- risk = "high" if (suspicion_score > 1 or volatility > 2.0 or entropy_index > self.entropy_threshold) else "low"
265
- return {
266
- "suspicion_score": suspicion_score,
267
- "entropy_index": entropy_index,
268
- "ethical_alignment": ethical_alignment,
269
- "harmonic_volatility": volatility,
270
- "pre_corruption_risk": risk
271
- }
272
-
273
- def _universal_reasoning(self, signal, tokens):
274
- frames = ["utilitarian", "deontological", "virtue", "systems"]
275
- results, score = {}, 0
276
- for frame in frames:
277
- if frame == "utilitarian":
278
- repair_count = sum(1 for token in tokens if fuzz.ratio(self.lemmatizer.lemmatize("repair"), token) >= self.fuzzy_threshold)
279
- corruption_count = sum(1 for token in tokens if fuzz.ratio(self.lemmatizer.lemmatize("corruption"), token) >= self.fuzzy_threshold)
280
- val = repair_count - corruption_count
281
- result = "positive" if val >= 0 else "negative"
282
- elif frame == "deontological":
283
- truth_present = any(fuzz.ratio(self.lemmatizer.lemmatize("truth"), token) >= self.fuzzy_threshold for token in tokens)
284
- chaos_present = any(fuzz.ratio(self.lemmatizer.lemmatize("chaos"), token) >= self.fuzzy_threshold for token in tokens)
285
- result = "valid" if truth_present and not chaos_present else "violated"
286
- elif frame == "virtue":
287
- ok = any(any(fuzz.ratio(self.lemmatizer.lemmatize(t), token) >= self.fuzzy_threshold for token in tokens) for t in self.config["virtue_terms"])
288
- result = "aligned" if ok else "misaligned"
289
- elif frame == "systems":
290
- result = "stable" if "::" in signal else "fragmented"
291
- results[frame] = result
292
- if result in ["positive", "valid", "aligned", "stable"]:
293
- score += 1
294
- verdict = "approved" if score >= 2 else "blocked"
295
- return results, verdict
296
-
297
- def _perspective_colleen(self, signal):
298
- vec, vec_serialized = self._rotate_vector(signal)
299
- return {"agent": "Colleen", "vector": vec_serialized}
300
-
301
- def _perspective_luke(self, signal_lower, tokens):
302
- ethics = self._tag_ethics(signal_lower, tokens)
303
- entropy_level = self._entropy(signal_lower, tokens)
304
- state = "stabilized" if entropy_level < self.entropy_threshold else "diffused"
305
- return {"agent": "Luke", "ethics": ethics, "entropy": entropy_level, "state": state}
306
-
307
- def _perspective_kellyanne(self, signal_lower):
308
- harmonics = self._resonance_equation(signal_lower)
309
- return {"agent": "Kellyanne", "harmonics": harmonics}
310
-
311
- def process(self, input_signal):
312
- start_time = time.perf_counter()
313
- signal_lower = input_signal.lower()
314
- tokens = self._tokenize_and_lemmatize(signal_lower)
315
- key = self._hash(input_signal)
316
- intent_vector = self._predict_intent_vector(signal_lower, tokens)
317
-
318
- if intent_vector["pre_corruption_risk"] == "high":
319
- final_record = {
320
- "hash": key,
321
- "timestamp": datetime.utcnow().isoformat(),
322
- "input": input_signal,
323
- "intent_warning": intent_vector,
324
- "verdict": "adaptive intervention",
325
- "message": "Signal flagged for pre-corruption adaptation. Reframing required."
326
- }
327
- self.cache[key].append(final_record)
328
- self.memory[key] = final_record
329
- self._save_memory()
330
- logger.info(f"Processed {input_signal} (high risk) in {time.perf_counter() - start_time}s")
331
- return final_record
332
-
333
- perspectives_output = {
334
- "Colleen": self._perspective_colleen(input_signal),
335
- "Luke": self._perspective_luke(signal_lower, tokens),
336
- "Kellyanne": self._perspective_kellyanne(signal_lower)
337
- }
338
-
339
- spider_signal = "::".join([str(perspectives_output[p]) for p in self.perspectives])
340
- vec, _ = self._rotate_vector(spider_signal)
341
- entangled = self._entanglement_tensor(vec)
342
- entangled_serialized = [{"real": v.real, "imag": v.imag} for v in entangled]
343
- reasoning, verdict = self._universal_reasoning(spider_signal, tokens)
344
-
345
- final_record = {
346
- "hash": key,
347
- "timestamp": datetime.utcnow().isoformat(),
348
- "input": input_signal,
349
- "intent_signature": intent_vector,
350
- "perspectives": perspectives_output,
351
- "entangled": entangled_serialized,
352
- "reasoning": reasoning,
353
- "verdict": verdict
354
- }
355
-
356
- self.cache[key].append(final_record)
357
- self.memory[key] = final_record
358
- self._save_memory()
359
- logger.info(f"Processed {input_signal} in {time.perf_counter() - start_time}s")
360
- return final_record
361
-
362
- def process_batch(self, signals):
363
- with ThreadPoolExecutor(max_workers=4) as executor:
364
- return list(executor.map(self.process, signals))
365
-
366
- def query_memory(self, query_string):
367
- with sqlite3.connect(self.memory_path) as conn:
368
- cursor = conn.cursor()
369
- cursor.execute("SELECT rowid, * FROM memory_fts WHERE memory_fts MATCH ?", (query_string,))
370
- return [dict(zip([d[0] for d in cursor.description], row)) for row in cursor.fetchall()]
371
-
372
- def update_config(self, new_config):
373
- for key, value in new_config.items():
374
- if key in {"entropy_threshold", "fuzzy_threshold"} and isinstance(value, (int, float)):
375
- setattr(self, key, value)
376
- elif key in self.config and isinstance(value, list):
377
- self.config[key] = value
378
- logger.info(f"Updated config with {new_config}")
379
-
380
- def _prune_and_rotate_memory(self):
381
- now = datetime.utcnow()
382
- with LockManager(f"{self.memory_path}.lock"):
383
- with sqlite3.connect(self.memory_path) as conn:
384
- cursor = conn.cursor()
385
- cursor.execute("""
386
- DELETE FROM memory
387
- WHERE timestamp < ?
388
- """, ((now - self.memory_ttl).isoformat(),))
389
- cursor.execute("DELETE FROM memory_fts WHERE rowid NOT IN (SELECT hash FROM memory)")
390
- conn.commit()
391
- cursor.execute("SELECT COUNT(*) FROM memory")
392
- count = cursor.fetchone()[0]
393
- db_size_mb = os.path.getsize(self.memory_path) / (1024 * 1024)
394
- if count >= self.max_memory_entries or db_size_mb >= self.max_db_size_mb:
395
- self._rotate_memory_file()
396
- cursor.execute("DELETE FROM memory")
397
- cursor.execute("DELETE FROM memory_fts")
398
- conn.commit()
399
- self.memory = {}
400
-
401
- # Initialize the engine for the demo
402
- engine = NexisSignalEngine(memory_path="signals.db", max_memory_entries=100, memory_ttl_days=1, max_db_size_mb=10)
403
-
404
- # Gradio interface function
405
- def analyze_signal(input_text):
406
- try:
407
- result = engine.process(input_text)
408
- return json.dumps(result, indent=2)
409
- except Exception as e:
410
- return f"Error: {str(e)}"
411
-
412
- # Create Gradio interface
413
- interface = gr.Interface(
414
- fn=analyze_signal,
415
- inputs=gr.Textbox(lines=2, placeholder="Enter a signal (e.g., 'tru/th hopee cha0s')"),
416
- outputs=gr.Textbox(lines=10, label="Analysis Result"),
417
- title="Nexis Signal Engine Demo",
418
- description="Analyze signals with the Nexis Signal Engine, featuring adversarial resilience and agent-based reasoning. Try obfuscated inputs like 'tru/th' or 'cha0s'!"
419
- )
420
-
421
- # Launch the interface
422
- interface.launch()