Syncbuz120 commited on
Commit
494bf87
·
1 Parent(s): 72ca5f8
Files changed (1) hide show
  1. model/generate.py +237 -390
model/generate.py CHANGED
@@ -5,411 +5,258 @@ import logging
5
  import psutil
6
  import re
7
  import gc
8
- from typing import List, Dict, Any, Optional, Tuple
9
- from dataclasses import dataclass
10
 
11
- # Configure logging
12
- logging.basicConfig(level=logging.INFO)
13
  logger = logging.getLogger(__name__)
 
14
 
15
- # Constants
16
  MEMORY_OPTIMIZED_MODELS = [
17
- "distilgpt2", # ~250MB - default first choice
18
- "microsoft/DialoGPT-small", # ~250MB - good for conversational context
19
- "gpt2", # ~500MB - fallback if more memory available
 
20
  ]
21
 
22
- MIN_MEMORY_FOR_MODEL = 500 # Minimum MB required to attempt model loading
23
- MAX_TEST_CASES = 15 # Maximum number of test cases to return
24
-
25
- @dataclass
26
- class TestCase:
27
- id: str
28
- title: str
29
- description: str
30
- preconditions: List[str]
31
- steps: List[str]
32
- expected: str
33
- postconditions: List[str]
34
- test_data: str
35
- priority: str = "Medium"
36
- category: str = "Functional"
37
-
38
- # Enhanced pattern matching with more categories
39
- REQUIREMENT_PATTERNS = {
40
- 'authentication': {
41
- 'keywords': ['login', 'authentication', 'signin', 'sign in', 'password', 'username', 'credential', 'auth'],
42
- 'priority': 'High',
43
- 'category': 'Security',
44
- 'generator': 'generate_security_tests'
45
- },
46
- 'authorization': {
47
- 'keywords': ['permission', 'role', 'access', 'privilege', 'authorize', 'admin', 'user level'],
48
- 'priority': 'High',
49
- 'category': 'Security',
50
- 'generator': 'generate_security_tests'
51
- },
52
- 'data_validation': {
53
- 'keywords': ['validate', 'validation', 'input', 'format', 'check', 'verify', 'constraint'],
54
- 'priority': 'High',
55
- 'category': 'Functional',
56
- 'generator': 'generate_validation_tests'
57
- },
58
- 'database': {
59
- 'keywords': ['database', 'db', 'store', 'save', 'persist', 'record', 'data storage', 'crud'],
60
- 'priority': 'Medium',
61
- 'category': 'Data',
62
- 'generator': 'generate_data_tests'
63
- },
64
- 'performance': {
65
- 'keywords': ['performance', 'speed', 'time', 'response', 'load', 'concurrent', 'scalability'],
66
- 'priority': 'Medium',
67
- 'category': 'Performance',
68
- 'generator': 'generate_performance_tests'
69
- },
70
- 'api': {
71
- 'keywords': ['api', 'endpoint', 'service', 'request', 'response', 'rest', 'http'],
72
- 'priority': 'High',
73
- 'category': 'Integration',
74
- 'generator': 'generate_api_tests'
75
- },
76
- 'error_handling': {
77
- 'keywords': ['error', 'exception', 'failure', 'invalid', 'incorrect', 'wrong'],
78
- 'priority': 'High',
79
- 'category': 'Reliability',
80
- 'generator': 'generate_error_tests'
81
- },
82
- 'security': {
83
- 'keywords': ['security', 'encrypt', 'secure', 'ssl', 'https', 'token', 'session'],
84
- 'priority': 'High',
85
- 'category': 'Security',
86
- 'generator': 'generate_security_tests'
87
- }
88
- }
89
-
90
- class TestCaseGenerator:
91
- """Main class for generating test cases with AI and template fallback"""
92
-
93
- def __init__(self):
94
- self.model_name = None
95
- self.tokenizer = None
96
- self.model = None
97
- self._initialize_model()
98
-
99
- def _initialize_model(self):
100
- """Initialize the optimal model based on available memory"""
101
- available_mem = psutil.virtual_memory().available / (1024 * 1024)
102
- logger.info(f"Available memory: {available_mem:.1f}MB")
103
-
104
- if available_mem < MIN_MEMORY_FOR_MODEL:
105
- logger.warning("Insufficient memory for model loading, using template fallback")
106
- return
107
-
108
- # Try models in order of preference
109
- for model_name in MEMORY_OPTIMIZED_MODELS:
110
- try:
111
- self.tokenizer, self.model = self._load_model_safely(model_name)
112
- if self.model:
113
- self.model_name = model_name
114
- logger.info(f"Successfully loaded model: {model_name}")
115
- break
116
- except Exception as e:
117
- logger.warning(f"Failed to load {model_name}: {str(e)}")
118
- continue
119
-
120
- def _load_model_safely(self, model_name: str) -> Tuple[Optional[AutoTokenizer], Optional[AutoModelForCausalLM]]:
121
- """Safely load model with memory optimizations"""
122
- try:
123
- logger.info(f"Attempting to load {model_name}")
124
-
125
- # Load tokenizer first
126
- tokenizer = AutoTokenizer.from_pretrained(
127
- model_name,
128
- padding_side='left',
129
- use_fast=True
130
- )
131
-
132
- # Ensure pad token is set
133
- if tokenizer.pad_token is None:
134
- tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else '[PAD]'
135
-
136
- # Load model with optimized settings
137
- model = AutoModelForCausalLM.from_pretrained(
138
- model_name,
139
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
140
- low_cpu_mem_usage=True,
141
- device_map="auto" if torch.cuda.is_available() else None
142
- )
143
-
144
- # Explicitly move to CPU if needed
145
- if not torch.cuda.is_available():
146
- model = model.to('cpu')
147
-
148
- model.eval()
149
- return tokenizer, model
150
-
151
- except Exception as e:
152
- logger.error(f"Error loading model {model_name}: {str(e)}")
153
- # Clean up if partial load occurred
154
- if 'tokenizer' in locals():
155
- del tokenizer
156
- if 'model' in locals() and model:
157
- del model
158
- gc.collect()
159
- if torch.cuda.is_available():
160
- torch.cuda.empty_cache()
161
- return None, None
162
-
163
- def generate_test_cases(self, srs_text: str) -> List[TestCase]:
164
- """Generate test cases using best available method"""
165
- # First try AI generation if model is available
166
- if self.model and self.tokenizer:
167
- try:
168
- ai_cases = self._generate_with_ai(srs_text)
169
- if ai_cases:
170
- logger.info("Successfully generated test cases with AI")
171
- return ai_cases[:MAX_TEST_CASES]
172
- except Exception as e:
173
- logger.warning(f"AI generation failed: {str(e)}, falling back to templates")
174
-
175
- # Fall back to template-based generation
176
- return self._generate_with_templates(srs_text)[:MAX_TEST_CASES]
177
-
178
- def _generate_with_ai(self, srs_text: str) -> List[TestCase]:
179
- """Generate test cases using AI model"""
180
- max_input_length = 500 # Increased from 300 for better context
181
- prompt = f"""Generate comprehensive test cases for these software requirements:
182
- {self._truncate_text(srs_text, max_input_length)}
183
-
184
- Provide test cases in this format:
185
- 1. [Test Case Title]
186
- - Description: [description]
187
- - Steps: [step1; step2; step3]
188
- - Expected: [expected result]
189
-
190
- 2. [Next Test Case Title]..."""
191
-
192
- try:
193
- inputs = self.tokenizer(
194
- prompt,
195
- return_tensors="pt",
196
- max_length=512,
197
- truncation=True,
198
- padding=True,
199
- return_attention_mask=True
200
- )
201
-
202
- # Generate with more controlled parameters
203
- with torch.no_grad():
204
- outputs = self.model.generate(
205
- input_ids=inputs['input_ids'],
206
- attention_mask=inputs['attention_mask'],
207
- max_new_tokens=300,
208
- num_return_sequences=1,
209
- temperature=0.7,
210
- top_p=0.9,
211
- do_sample=True,
212
- pad_token_id=self.tokenizer.pad_token_id,
213
- eos_token_id=self.tokenizer.eos_token_id
214
- )
215
-
216
- generated = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
217
- return self._parse_ai_output(generated)
218
-
219
- except Exception as e:
220
- logger.error(f"AI generation error: {str(e)}")
221
- raise
222
- finally:
223
- # Clean up
224
- if 'inputs' in locals():
225
- del inputs
226
- if 'outputs' in locals():
227
- del outputs
228
- gc.collect()
229
- if torch.cuda.is_available():
230
- torch.cuda.empty_cache()
231
-
232
- def _parse_ai_output(self, text: str) -> List[TestCase]:
233
- """Parse AI-generated text into structured test cases"""
234
- cases = []
235
- current_case = None
236
-
237
- for line in text.split('\n'):
238
- line = line.strip()
239
- if line.startswith(('1.', '2.', '3.', '4.', '5.', '6.', '7.', '8.', '9.')):
240
- if current_case:
241
- cases.append(current_case)
242
- title = line[2:].strip()
243
- current_case = TestCase(
244
- id=f"TC_AI_{len(cases)+1:03d}",
245
- title=title,
246
- description="",
247
- preconditions=["System is accessible"],
248
- steps=[],
249
- expected="",
250
- postconditions=["Test executed"],
251
- test_data="As specified in requirements",
252
- priority="Medium",
253
- category="Functional"
254
- )
255
- elif line.lower().startswith('description:') and current_case:
256
- current_case.description = line[12:].strip()
257
- elif line.lower().startswith('steps:') and current_case:
258
- steps = line[6:].strip().split(';')
259
- current_case.steps = [s.strip() for s in steps if s.strip()]
260
- elif line.lower().startswith('expected:') and current_case:
261
- current_case.expected = line[9:].strip()
262
-
263
- if current_case:
264
- cases.append(current_case)
265
-
266
- return cases or [self._create_fallback_case()]
267
-
268
- def _generate_with_templates(self, srs_text: str) -> List[TestCase]:
269
- """Generate test cases using pattern matching and templates"""
270
- patterns = self._analyze_requirements(srs_text)
271
- test_cases = []
272
-
273
- for pattern_name, pattern_data in patterns.items():
274
- generator_name = REQUIREMENT_PATTERNS[pattern_name]['generator']
275
- generator = getattr(self, generator_name, self._generate_generic_tests)
276
- cases = generator(pattern_data['matches'])
277
-
278
- for i, case in enumerate(cases):
279
- case.id = f"TC_{pattern_name.upper()}_{i+1:03d}"
280
- case.priority = pattern_data['priority']
281
- case.category = pattern_data['category']
282
- test_cases.append(case)
283
-
284
- return test_cases or [self._create_fallback_case()]
285
-
286
- def _analyze_requirements(self, text: str) -> Dict[str, Any]:
287
- """Analyze text to detect requirement patterns"""
288
- text_lower = text.lower()
289
- detected = {}
290
-
291
- for name, info in REQUIREMENT_PATTERNS.items():
292
- matches = []
293
- for kw in info['keywords']:
294
- if kw in text_lower:
295
- # Find context around keyword
296
- context = re.findall(rf'.{{0,50}}{re.escape(kw)}.{{0,50}}', text_lower)
297
- matches.extend(context[:3]) # Limit contexts
298
-
299
- if matches:
300
- detected[name] = {
301
- 'matches': matches,
302
- 'priority': info['priority'],
303
- 'category': info['category']
304
- }
305
-
306
- return detected
307
-
308
- def _create_fallback_case(self) -> TestCase:
309
- """Create a generic fallback test case"""
310
- return TestCase(
311
- id="TC_GEN_001",
312
- title="General Functionality Test",
313
- description="Verify basic system functionality",
314
- preconditions=["System is accessible"],
315
- steps=["Execute core functionality"],
316
- expected="System behaves as expected",
317
- postconditions=["Test completed"],
318
- test_data="Standard test data",
319
- priority="Medium",
320
- category="Functional"
321
  )
322
-
323
- def _truncate_text(self, text: str, max_length: int) -> str:
324
- """Safely truncate text to maximum length"""
325
- return text[:max_length] + '...' if len(text) > max_length else text
326
-
327
- # Template generators for different test types
328
- def generate_security_tests(self, matches: List[str]) -> List[TestCase]:
329
- """Generate security-related test cases"""
330
- return [
331
- TestCase(
332
- id="",
333
- title="Authentication Validation",
334
- description="Verify proper authentication mechanism",
335
- preconditions=["System has authentication configured"],
336
- steps=[
337
- "Attempt login with valid credentials",
338
- "Attempt login with invalid credentials",
339
- "Verify session handling"
340
- ],
341
- expected="Valid login succeeds, invalid fails, sessions secure",
342
- postconditions=["Security verified"],
343
- test_data="Test credentials",
344
- priority="High",
345
- category="Security"
346
- ),
347
- # Additional security test cases...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
  ]
349
-
350
- def generate_validation_tests(self, matches: List[str]) -> List[TestCase]:
351
- """Generate data validation test cases"""
352
- return [
353
- TestCase(
354
- id="",
355
- title="Input Validation",
356
- description="Verify input validation rules",
357
- preconditions=["System accepts user input"],
358
- steps=[
359
- "Enter valid input",
360
- "Enter invalid input",
361
- "Verify system response"
362
- ],
363
- expected="Valid input accepted, invalid rejected with messages",
364
- postconditions=["Validation rules verified"],
365
- test_data="Valid and invalid test data",
366
- priority="High",
367
- category="Functional"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  )
369
- ]
370
-
371
- # Additional generator methods for other test types...
372
- # generate_performance_tests, generate_api_tests, etc.
373
 
374
- # Singleton instance
375
- _generator_instance = None
 
 
376
 
377
- def get_generator() -> TestCaseGenerator:
378
- """Get the singleton generator instance"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
  global _generator_instance
380
  if _generator_instance is None:
381
- _generator_instance = TestCaseGenerator()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  return _generator_instance
383
 
384
- def generate_test_cases(srs_text: str) -> List[Dict]:
385
- """Main API function to generate test cases"""
386
- generator = get_generator()
387
- cases = generator.generate_test_cases(srs_text)
388
- return [case.__dict__ for case in cases]
389
-
390
- def generate_test_cases_and_info(srs_text: str) -> Dict[str, Any]:
391
- """Generate test cases with metadata about generation method"""
392
- generator = get_generator()
393
- cases = generator.generate_test_cases(srs_text)
394
-
395
  return {
396
- "model": generator.model_name or "Template-Based",
397
- "algorithm": "AI" if generator.model else "Template",
398
- "test_cases": [case.__dict__ for case in cases],
399
- "memory_usage": f"{psutil.Process().memory_info().rss / (1024 * 1024):.1f}MB"
400
  }
401
 
402
- # Example usage
403
- if __name__ == "__main__":
404
- sample_reqs = """
405
- The system must implement secure user authentication with password hashing.
406
- All API endpoints must validate input data and return appropriate error codes.
407
- The application should handle 100 concurrent users with response times under 2 seconds.
408
- """
409
-
410
- print("Generating test cases...")
411
- test_cases = generate_test_cases(sample_reqs)
412
- for case in test_cases:
413
- print(f"\n{case['id']}: {case['title']}")
414
- print(f"Priority: {case['priority']}, Category: {case['category']}")
415
- print(f"Steps: {case['steps']}")
 
5
  import psutil
6
  import re
7
  import gc
 
 
8
 
9
+ # Initialize logger
 
10
  logger = logging.getLogger(__name__)
11
+ logging.basicConfig(level=logging.INFO)
12
 
13
+ # List of memory-optimized models
14
  MEMORY_OPTIMIZED_MODELS = [
15
+ "gpt2", # ~500MB
16
+ "distilgpt2", # ~250MB
17
+ "microsoft/DialoGPT-small", # ~250MB
18
+ "huggingface/CodeBERTa-small-v1", # Code tasks
19
  ]
20
 
21
+ # Singleton state
22
+ _generator_instance = None
23
+
24
+ def get_optimal_model_for_memory():
25
+ """Select the best model based on available memory."""
26
+ available_memory = psutil.virtual_memory().available / (1024 * 1024) # MB
27
+ logger.info(f"Available memory: {available_memory:.1f}MB")
28
+
29
+ if available_memory < 300:
30
+ return None # Use template fallback
31
+ elif available_memory < 600:
32
+ return "microsoft/DialoGPT-small"
33
+ else:
34
+ return "distilgpt2"
35
+
36
+ def load_model_with_memory_optimization(model_name):
37
+ """Load model with low memory settings."""
38
+ try:
39
+ logger.info(f"Loading {model_name} with memory optimizations...")
40
+
41
+ tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left', use_fast=True)
42
+
43
+ if tokenizer.pad_token is None:
44
+ tokenizer.pad_token = tokenizer.eos_token
45
+
46
+ model = AutoModelForCausalLM.from_pretrained(
47
+ model_name,
48
+ torch_dtype=torch.float16,
49
+ device_map="cpu",
50
+ low_cpu_mem_usage=True,
51
+ use_cache=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  )
53
+
54
+ model.eval()
55
+ model.gradient_checkpointing_enable()
56
+ logger.info(f"✅ Model {model_name} loaded successfully")
57
+ return tokenizer, model
58
+
59
+ except Exception as e:
60
+ logger.error(f" Failed to load model {model_name}: {e}")
61
+ return None, None
62
+
63
+ def extract_keywords(text):
64
+ common_keywords = [
65
+ 'login', 'authentication', 'user', 'password', 'database', 'data',
66
+ 'interface', 'api', 'function', 'feature', 'requirement', 'system',
67
+ 'input', 'output', 'validation', 'error', 'security', 'performance'
68
+ ]
69
+ words = re.findall(r'\b\w+\b', text.lower())
70
+ return [word for word in words if word in common_keywords]
71
+
72
+ def generate_template_based_test_cases(srs_text):
73
+ keywords = extract_keywords(srs_text)
74
+ test_cases = []
75
+
76
+ if any(word in keywords for word in ['login', 'authentication', 'user', 'password']):
77
+ test_cases.extend([
78
+ {
79
+ "id": "TC_001",
80
+ "title": "Valid Login Test",
81
+ "description": "Test login with valid credentials",
82
+ "steps": ["Enter valid username", "Enter valid password", "Click login"],
83
+ "expected": "User should be logged in successfully"
84
+ },
85
+ {
86
+ "id": "TC_002",
87
+ "title": "Invalid Login Test",
88
+ "description": "Test login with invalid credentials",
89
+ "steps": ["Enter invalid username", "Enter invalid password", "Click login"],
90
+ "expected": "Error message should be displayed"
91
+ }
92
+ ])
93
+
94
+ if any(word in keywords for word in ['database', 'data', 'store', 'save']):
95
+ test_cases.append({
96
+ "id": "TC_003",
97
+ "title": "Data Storage Test",
98
+ "description": "Test data storage functionality",
99
+ "steps": ["Enter data", "Save data", "Verify storage"],
100
+ "expected": "Data should be stored correctly"
101
+ })
102
+
103
+ if not test_cases:
104
+ test_cases = [
105
+ {
106
+ "id": "TC_001",
107
+ "title": "Basic Functionality Test",
108
+ "description": "Test basic system functionality",
109
+ "steps": ["Access the system", "Perform basic operations", "Verify results"],
110
+ "expected": "System should work as expected"
111
+ }
112
  ]
113
+
114
+ return test_cases
115
+
116
+ def parse_generated_test_cases(generated_text):
117
+ lines = generated_text.split('\n')
118
+ test_cases = []
119
+ current_case = {}
120
+ case_counter = 1
121
+
122
+ for line in lines:
123
+ line = line.strip()
124
+ if line.startswith(('1.', '2.', '3.', 'TC', 'Test')):
125
+ if current_case:
126
+ test_cases.append(current_case)
127
+ current_case = {
128
+ "id": f"TC_{case_counter:03d}",
129
+ "title": line,
130
+ "description": line,
131
+ "steps": ["Execute the test"],
132
+ "expected": "Test should pass"
133
+ }
134
+ case_counter += 1
135
+
136
+ if current_case:
137
+ test_cases.append(current_case)
138
+
139
+ if not test_cases:
140
+ return [{
141
+ "id": "TC_001",
142
+ "title": "Generated Test Case",
143
+ "description": "Auto-generated test case based on requirements",
144
+ "steps": ["Review requirements", "Execute test", "Verify results"],
145
+ "expected": "Requirements should be met"
146
+ }]
147
+
148
+ return test_cases
149
+
150
+ def generate_with_ai_model(srs_text, tokenizer, model):
151
+ max_input_length = 200
152
+ if len(srs_text) > max_input_length:
153
+ srs_text = srs_text[:max_input_length]
154
+
155
+ prompt = f"""Generate test cases for this software requirement:
156
+ {srs_text}
157
+
158
+ Test Cases:
159
+ 1."""
160
+
161
+ try:
162
+ inputs = tokenizer.encode(
163
+ prompt,
164
+ return_tensors="pt",
165
+ max_length=150,
166
+ truncation=True
167
+ )
168
+
169
+ with torch.no_grad():
170
+ outputs = model.generate(
171
+ inputs,
172
+ max_new_tokens=100,
173
+ num_return_sequences=1,
174
+ temperature=0.7,
175
+ do_sample=True,
176
+ pad_token_id=tokenizer.eos_token_id,
177
+ use_cache=False,
178
  )
 
 
 
 
179
 
180
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
181
+ del inputs, outputs
182
+ torch.cuda.empty_cache() if torch.cuda.is_available() else None
183
+ return parse_generated_test_cases(generated_text)
184
 
185
+ except Exception as e:
186
+ logger.error(f" AI generation failed: {e}")
187
+ raise
188
+
189
+ def generate_with_fallback(srs_text):
190
+ model_name = get_optimal_model_for_memory()
191
+
192
+ if model_name:
193
+ tokenizer, model = load_model_with_memory_optimization(model_name)
194
+ if tokenizer and model:
195
+ try:
196
+ test_cases = generate_with_ai_model(srs_text, tokenizer, model)
197
+ reason = get_algorithm_reason(model_name)
198
+ return test_cases, model_name, "transformer (causal LM)", reason
199
+ except Exception as e:
200
+ logger.warning(f"AI generation failed: {e}, falling back to templates")
201
+
202
+ logger.info("⚠️ Using fallback template-based generation")
203
+ test_cases = generate_template_based_test_cases(srs_text)
204
+ return test_cases, "Template-Based Generator", "rule-based", "Low memory - fallback to rule-based generation"
205
+
206
+ # ✅ Function exposed to app.py
207
+ def generate_test_cases(srs_text):
208
+ return generate_with_fallback(srs_text)[0]
209
+
210
+ def get_generator():
211
  global _generator_instance
212
  if _generator_instance is None:
213
+ class Generator:
214
+ def __init__(self):
215
+ self.model_name = get_optimal_model_for_memory()
216
+ self.tokenizer = None
217
+ self.model = None
218
+ if self.model_name:
219
+ self.tokenizer, self.model = load_model_with_memory_optimization(self.model_name)
220
+
221
+ def get_model_info(self):
222
+ mem = psutil.Process().memory_info().rss / 1024 / 1024
223
+ return {
224
+ "model_name": self.model_name if self.model_name else "Template-Based Generator",
225
+ "status": "loaded" if self.model else "template_mode",
226
+ "memory_usage": f"{mem:.1f}MB",
227
+ "optimization": "low_memory"
228
+ }
229
+
230
+ _generator_instance = Generator()
231
+
232
  return _generator_instance
233
 
234
+ def monitor_memory():
235
+ mem = psutil.Process().memory_info().rss / 1024 / 1024
236
+ logger.info(f"Memory usage: {mem:.1f}MB")
237
+ if mem > 450:
238
+ gc.collect()
239
+ logger.info("Memory cleanup triggered")
240
+
241
+ # ✅ NEW FUNCTION for enhanced output: test cases + model info + reason
242
+ def generate_test_cases_and_info(input_text):
243
+ test_cases, model_name, algorithm_used, reason = generate_with_fallback(input_text)
 
244
  return {
245
+ "model": model_name,
246
+ "algorithm": algorithm_used,
247
+ "reason": reason,
248
+ "test_cases": test_cases
249
  }
250
 
251
+ # Explain why each algorithm is selected
252
+ def get_algorithm_reason(model_name):
253
+ if model_name == "microsoft/DialoGPT-small":
254
+ return "Selected due to low memory availability; DialoGPT-small provides conversational understanding in limited memory environments."
255
+ elif model_name == "distilgpt2":
256
+ return "Selected for its balance between performance and low memory usage. Ideal for small environments needing causal language modeling."
257
+ elif model_name == "gpt2":
258
+ return "Chosen for general-purpose text generation with moderate memory headroom."
259
+ elif model_name is None:
260
+ return "No model used due to insufficient memory. Rule-based template generation chosen instead."
261
+ else:
262
+ return "Model selected based on best tradeoff between memory usage and language generation capability."