Spaces:
Running
Running
Commit
·
72ca5f8
1
Parent(s):
2d9a138
newwwwww
Browse files- model/generate.py +351 -620
model/generate.py
CHANGED
@@ -5,680 +5,411 @@ import logging
|
|
5 |
import psutil
|
6 |
import re
|
7 |
import gc
|
8 |
-
import
|
9 |
-
from
|
10 |
|
11 |
-
#
|
12 |
-
logger = logging.getLogger(__name__)
|
13 |
logging.basicConfig(level=logging.INFO)
|
|
|
14 |
|
15 |
-
#
|
16 |
MEMORY_OPTIMIZED_MODELS = [
|
17 |
-
"
|
18 |
-
"
|
19 |
-
"
|
20 |
-
"huggingface/CodeBERTa-small-v1", # Code tasks
|
21 |
]
|
22 |
|
23 |
-
#
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
REQUIREMENT_PATTERNS = {
|
28 |
'authentication': {
|
29 |
'keywords': ['login', 'authentication', 'signin', 'sign in', 'password', 'username', 'credential', 'auth'],
|
30 |
'priority': 'High',
|
31 |
-
'category': 'Security'
|
|
|
32 |
},
|
33 |
'authorization': {
|
34 |
'keywords': ['permission', 'role', 'access', 'privilege', 'authorize', 'admin', 'user level'],
|
35 |
'priority': 'High',
|
36 |
-
'category': 'Security'
|
|
|
37 |
},
|
38 |
'data_validation': {
|
39 |
'keywords': ['validate', 'validation', 'input', 'format', 'check', 'verify', 'constraint'],
|
40 |
'priority': 'High',
|
41 |
-
'category': 'Functional'
|
|
|
42 |
},
|
43 |
'database': {
|
44 |
'keywords': ['database', 'db', 'store', 'save', 'persist', 'record', 'data storage', 'crud'],
|
45 |
'priority': 'Medium',
|
46 |
-
'category': '
|
|
|
47 |
},
|
48 |
'performance': {
|
49 |
'keywords': ['performance', 'speed', 'time', 'response', 'load', 'concurrent', 'scalability'],
|
50 |
'priority': 'Medium',
|
51 |
-
'category': 'Performance'
|
52 |
-
|
53 |
-
'ui_interface': {
|
54 |
-
'keywords': ['interface', 'ui', 'user interface', 'display', 'screen', 'form', 'button', 'menu'],
|
55 |
-
'priority': 'Medium',
|
56 |
-
'category': 'UI/UX'
|
57 |
},
|
58 |
'api': {
|
59 |
'keywords': ['api', 'endpoint', 'service', 'request', 'response', 'rest', 'http'],
|
60 |
'priority': 'High',
|
61 |
-
'category': 'Integration'
|
|
|
62 |
},
|
63 |
'error_handling': {
|
64 |
'keywords': ['error', 'exception', 'failure', 'invalid', 'incorrect', 'wrong'],
|
65 |
'priority': 'High',
|
66 |
-
'category': '
|
67 |
-
|
68 |
-
'reporting': {
|
69 |
-
'keywords': ['report', 'export', 'generate', 'analytics', 'dashboard', 'chart'],
|
70 |
-
'priority': 'Medium',
|
71 |
-
'category': 'Reporting'
|
72 |
},
|
73 |
'security': {
|
74 |
'keywords': ['security', 'encrypt', 'secure', 'ssl', 'https', 'token', 'session'],
|
75 |
'priority': 'High',
|
76 |
-
'category': 'Security'
|
|
|
77 |
}
|
78 |
}
|
79 |
|
80 |
-
|
81 |
-
"""
|
82 |
-
available_memory = psutil.virtual_memory().available / (1024 * 1024) # MB
|
83 |
-
logger.info(f"Available memory: {available_memory:.1f}MB")
|
84 |
-
|
85 |
-
if available_memory < 300:
|
86 |
-
return None # Use template fallback
|
87 |
-
elif available_memory < 600:
|
88 |
-
return "microsoft/DialoGPT-small"
|
89 |
-
else:
|
90 |
-
return "distilgpt2"
|
91 |
-
|
92 |
-
def load_model_with_memory_optimization(model_name):
|
93 |
-
"""Load model with low memory settings."""
|
94 |
-
try:
|
95 |
-
logger.info(f"Loading {model_name} with memory optimizations...")
|
96 |
-
|
97 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side='left', use_fast=True)
|
98 |
-
|
99 |
-
if tokenizer.pad_token is None:
|
100 |
-
tokenizer.pad_token = tokenizer.eos_token
|
101 |
-
|
102 |
-
model = AutoModelForCausalLM.from_pretrained(
|
103 |
-
model_name,
|
104 |
-
torch_dtype=torch.float16,
|
105 |
-
device_map="cpu",
|
106 |
-
low_cpu_mem_usage=True,
|
107 |
-
use_cache=False,
|
108 |
-
)
|
109 |
-
|
110 |
-
model.eval()
|
111 |
-
model.gradient_checkpointing_enable()
|
112 |
-
logger.info(f"✅ Model {model_name} loaded successfully")
|
113 |
-
return tokenizer, model
|
114 |
-
|
115 |
-
except Exception as e:
|
116 |
-
logger.error(f"❌ Failed to load model {model_name}: {e}")
|
117 |
-
return None, None
|
118 |
-
|
119 |
-
def analyze_requirements(text: str) -> Dict[str, Any]:
|
120 |
-
"""Analyze requirements text to identify patterns and generate appropriate test cases"""
|
121 |
-
text_lower = text.lower()
|
122 |
-
detected_patterns = {}
|
123 |
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
pattern = rf'.{{0,50}}{re.escape(keyword)}.{{0,50}}'
|
130 |
-
context_matches = re.findall(pattern, text_lower, re.IGNORECASE)
|
131 |
-
matches.extend(context_matches)
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
'category': pattern_info['category']
|
138 |
-
}
|
139 |
-
|
140 |
-
return detected_patterns
|
141 |
-
|
142 |
-
def generate_authentication_tests(matches: List[str]) -> List[Dict]:
|
143 |
-
"""Generate comprehensive authentication test cases"""
|
144 |
-
base_tests = [
|
145 |
-
{
|
146 |
-
"title": "Valid User Login",
|
147 |
-
"description": "Verify that users can successfully log in with valid credentials",
|
148 |
-
"preconditions": ["User account exists", "Application is accessible"],
|
149 |
-
"steps": [
|
150 |
-
"Navigate to login page",
|
151 |
-
"Enter valid username",
|
152 |
-
"Enter valid password",
|
153 |
-
"Click login button"
|
154 |
-
],
|
155 |
-
"expected": "User is successfully authenticated and redirected to dashboard/home page",
|
156 |
-
"postconditions": ["User session is created", "User is logged in"],
|
157 |
-
"test_data": "Valid username: [email protected], Valid password: Test@123"
|
158 |
-
},
|
159 |
-
{
|
160 |
-
"title": "Invalid Username Login",
|
161 |
-
"description": "Verify that login fails with invalid username",
|
162 |
-
"preconditions": ["Application is accessible"],
|
163 |
-
"steps": [
|
164 |
-
"Navigate to login page",
|
165 |
-
"Enter invalid/non-existent username",
|
166 |
-
"Enter valid password format",
|
167 |
-
"Click login button"
|
168 |
-
],
|
169 |
-
"expected": "Login fails with appropriate error message 'Invalid credentials'",
|
170 |
-
"postconditions": ["User remains on login page", "Account security maintained"],
|
171 |
-
"test_data": "Valid username: [email protected], Invalid password: WrongPass123"
|
172 |
-
},
|
173 |
-
{
|
174 |
-
"title": "Empty Fields Login Attempt",
|
175 |
-
"description": "Verify validation when login attempted with empty fields",
|
176 |
-
"preconditions": ["Application is accessible"],
|
177 |
-
"steps": [
|
178 |
-
"Navigate to login page",
|
179 |
-
"Leave username field empty",
|
180 |
-
"Leave password field empty",
|
181 |
-
"Click login button"
|
182 |
-
],
|
183 |
-
"expected": "Validation errors displayed for required fields",
|
184 |
-
"postconditions": ["User remains on login page", "Form validation active"],
|
185 |
-
"test_data": "Username: (empty), Password: (empty)"
|
186 |
-
},
|
187 |
-
{
|
188 |
-
"title": "SQL Injection Attack Prevention",
|
189 |
-
"description": "Verify that login form prevents SQL injection attacks",
|
190 |
-
"preconditions": ["Application is accessible"],
|
191 |
-
"steps": [
|
192 |
-
"Navigate to login page",
|
193 |
-
"Enter SQL injection payload in username field",
|
194 |
-
"Enter any password",
|
195 |
-
"Click login button"
|
196 |
-
],
|
197 |
-
"expected": "Login fails safely without database compromise or error exposure",
|
198 |
-
"postconditions": ["System security maintained", "No unauthorized access"],
|
199 |
-
"test_data": "Username: admin'; DROP TABLE users; --, Password: anypass"
|
200 |
-
}
|
201 |
-
]
|
202 |
-
|
203 |
-
return base_tests
|
204 |
-
|
205 |
-
def generate_data_validation_tests(matches: List[str]) -> List[Dict]:
|
206 |
-
"""Generate comprehensive data validation test cases"""
|
207 |
-
return [
|
208 |
-
{
|
209 |
-
"title": "Valid Data Input Validation",
|
210 |
-
"description": "Verify system accepts valid data formats correctly",
|
211 |
-
"preconditions": ["Form/API endpoint is accessible", "User has appropriate permissions"],
|
212 |
-
"steps": [
|
213 |
-
"Access the input form/endpoint",
|
214 |
-
"Enter data in valid format",
|
215 |
-
"Submit the form/request",
|
216 |
-
"Verify data is accepted"
|
217 |
-
],
|
218 |
-
"expected": "Data is accepted and processed successfully with confirmation message",
|
219 |
-
"postconditions": ["Data is stored correctly", "User receives success feedback"],
|
220 |
-
"test_data": "Valid email: [email protected], Valid phone: +1-234-567-8900"
|
221 |
-
},
|
222 |
-
{
|
223 |
-
"title": "Invalid Data Format Rejection",
|
224 |
-
"description": "Verify system rejects invalid data formats",
|
225 |
-
"preconditions": ["Form/API endpoint is accessible"],
|
226 |
-
"steps": [
|
227 |
-
"Access the input form/endpoint",
|
228 |
-
"Enter data in invalid format",
|
229 |
-
"Submit the form/request",
|
230 |
-
"Verify validation error is shown"
|
231 |
-
],
|
232 |
-
"expected": "System rejects invalid data with clear error message",
|
233 |
-
"postconditions": ["Invalid data is not stored", "User guided to correct format"],
|
234 |
-
"test_data": "Invalid email: notanemail, Invalid phone: 123-abc-defg"
|
235 |
-
},
|
236 |
-
{
|
237 |
-
"title": "Boundary Value Testing",
|
238 |
-
"description": "Test data validation at boundary values",
|
239 |
-
"preconditions": ["System has defined data length/value limits"],
|
240 |
-
"steps": [
|
241 |
-
"Test with minimum allowed value",
|
242 |
-
"Test with maximum allowed value",
|
243 |
-
"Test with value just below minimum",
|
244 |
-
"Test with value just above maximum"
|
245 |
-
],
|
246 |
-
"expected": "Min/max values accepted, out-of-range values rejected appropriately",
|
247 |
-
"postconditions": ["Boundary validation working correctly"],
|
248 |
-
"test_data": "Min: 1, Max: 100, Below: 0, Above: 101"
|
249 |
-
},
|
250 |
-
{
|
251 |
-
"title": "Special Characters Handling",
|
252 |
-
"description": "Verify proper handling of special characters in input",
|
253 |
-
"preconditions": ["Input fields accept text data"],
|
254 |
-
"steps": [
|
255 |
-
"Enter text with special characters (!@#$%^&*)",
|
256 |
-
"Enter text with unicode characters (émañ)",
|
257 |
-
"Enter text with HTML tags (<script>)",
|
258 |
-
"Submit and verify handling"
|
259 |
-
],
|
260 |
-
"expected": "Special characters handled safely without breaking functionality",
|
261 |
-
"postconditions": ["Data integrity maintained", "No XSS vulnerabilities"],
|
262 |
-
"test_data": "Special: Test!@#$, Unicode: Café, HTML: <b>test</b>"
|
263 |
-
}
|
264 |
-
]
|
265 |
-
|
266 |
-
def generate_performance_tests(matches: List[str]) -> List[Dict]:
|
267 |
-
"""Generate comprehensive performance test cases"""
|
268 |
-
return [
|
269 |
-
{
|
270 |
-
"title": "Response Time Under Normal Load",
|
271 |
-
"description": "Verify system response time meets requirements under normal usage",
|
272 |
-
"preconditions": ["System is running in production-like environment", "Normal user load"],
|
273 |
-
"steps": [
|
274 |
-
"Execute typical user operations",
|
275 |
-
"Measure response times for key functions",
|
276 |
-
"Record average response times",
|
277 |
-
"Compare against SLA requirements"
|
278 |
-
],
|
279 |
-
"expected": "All operations complete within specified time limits (e.g., <3 seconds)",
|
280 |
-
"postconditions": ["Performance baseline established"],
|
281 |
-
"test_data": "Target: <3 sec for page loads, <1 sec for API calls"
|
282 |
-
},
|
283 |
-
{
|
284 |
-
"title": "Load Testing with Multiple Users",
|
285 |
-
"description": "Test system performance with concurrent users",
|
286 |
-
"preconditions": ["Load testing tools configured", "Test environment ready"],
|
287 |
-
"steps": [
|
288 |
-
"Simulate 100 concurrent users",
|
289 |
-
"Execute common user workflows",
|
290 |
-
"Monitor system resources (CPU, memory)",
|
291 |
-
"Measure response times and error rates"
|
292 |
-
],
|
293 |
-
"expected": "System maintains acceptable performance with <5% error rate",
|
294 |
-
"postconditions": ["Load capacity documented", "Performance bottlenecks identified"],
|
295 |
-
"test_data": "Concurrent users: 100, Duration: 30 minutes"
|
296 |
-
},
|
297 |
-
{
|
298 |
-
"title": "Memory Usage Optimization",
|
299 |
-
"description": "Verify system memory usage remains within acceptable limits",
|
300 |
-
"preconditions": ["System monitoring tools available"],
|
301 |
-
"steps": [
|
302 |
-
"Monitor memory usage during normal operations",
|
303 |
-
"Execute memory-intensive operations",
|
304 |
-
"Check for memory leaks over extended periods",
|
305 |
-
"Verify garbage collection effectiveness"
|
306 |
-
],
|
307 |
-
"expected": "Memory usage stays within allocated limits, no memory leaks detected",
|
308 |
-
"postconditions": ["Memory optimization verified"],
|
309 |
-
"test_data": "Memory limit: 512MB, Test duration: 2 hours"
|
310 |
-
}
|
311 |
-
]
|
312 |
-
|
313 |
-
def generate_api_tests(matches: List[str]) -> List[Dict]:
|
314 |
-
"""Generate comprehensive API test cases"""
|
315 |
-
return [
|
316 |
-
{
|
317 |
-
"title": "Valid API Request Processing",
|
318 |
-
"description": "Verify API correctly processes valid requests",
|
319 |
-
"preconditions": ["API endpoint is accessible", "Valid authentication token available"],
|
320 |
-
"steps": [
|
321 |
-
"Send GET/POST request with valid parameters",
|
322 |
-
"Include proper authentication headers",
|
323 |
-
"Verify response status code",
|
324 |
-
"Validate response data structure"
|
325 |
-
],
|
326 |
-
"expected": "API returns 200 OK with expected data format",
|
327 |
-
"postconditions": ["Request logged", "Data processed correctly"],
|
328 |
-
"test_data": "Endpoint: /api/users, Method: GET, Auth: Bearer token123"
|
329 |
-
},
|
330 |
-
{
|
331 |
-
"title": "Invalid API Request Handling",
|
332 |
-
"description": "Verify API properly handles invalid requests",
|
333 |
-
"preconditions": ["API endpoint is accessible"],
|
334 |
-
"steps": [
|
335 |
-
"Send request with invalid parameters",
|
336 |
-
"Send request with missing required fields",
|
337 |
-
"Send malformed JSON in request body",
|
338 |
-
"Verify error responses"
|
339 |
-
],
|
340 |
-
"expected": "API returns appropriate error codes (400, 422) with descriptive messages",
|
341 |
-
"postconditions": ["Errors logged appropriately", "System remains stable"],
|
342 |
-
"test_data": "Invalid param: user_id='invalid', Missing: required field 'name'"
|
343 |
-
},
|
344 |
-
{
|
345 |
-
"title": "API Authentication and Authorization",
|
346 |
-
"description": "Test API security and access controls",
|
347 |
-
"preconditions": ["API requires authentication"],
|
348 |
-
"steps": [
|
349 |
-
"Send request without authentication token",
|
350 |
-
"Send request with invalid/expired token",
|
351 |
-
"Send request with valid token but insufficient permissions",
|
352 |
-
"Verify security responses"
|
353 |
-
],
|
354 |
-
"expected": "Unauthorized requests return 401/403 with security maintained",
|
355 |
-
"postconditions": ["Security audit trail created"],
|
356 |
-
"test_data": "Valid token: Bearer abc123, Invalid: Bearer expired456"
|
357 |
-
}
|
358 |
-
]
|
359 |
-
|
360 |
-
def generate_error_handling_tests(matches: List[str]) -> List[Dict]:
|
361 |
-
"""Generate comprehensive error handling test cases"""
|
362 |
-
return [
|
363 |
-
{
|
364 |
-
"title": "Graceful Error Message Display",
|
365 |
-
"description": "Verify system displays user-friendly error messages",
|
366 |
-
"preconditions": ["Error conditions can be triggered"],
|
367 |
-
"steps": [
|
368 |
-
"Trigger various error conditions",
|
369 |
-
"Verify error messages are displayed",
|
370 |
-
"Check that messages are user-friendly",
|
371 |
-
"Ensure no technical details exposed"
|
372 |
-
],
|
373 |
-
"expected": "Clear, helpful error messages shown without exposing system internals",
|
374 |
-
"postconditions": ["User experience maintained during errors"],
|
375 |
-
"test_data": "Error scenarios: network timeout, invalid input, server error"
|
376 |
-
},
|
377 |
-
{
|
378 |
-
"title": "System Recovery After Errors",
|
379 |
-
"description": "Test system's ability to recover from error states",
|
380 |
-
"preconditions": ["System can be put into error state"],
|
381 |
-
"steps": [
|
382 |
-
"Trigger system error condition",
|
383 |
-
"Verify error is handled gracefully",
|
384 |
-
"Attempt normal operations after error",
|
385 |
-
"Verify system functionality restored"
|
386 |
-
],
|
387 |
-
"expected": "System recovers fully and continues normal operation",
|
388 |
-
"postconditions": ["System stability maintained", "No data corruption"],
|
389 |
-
"test_data": "Recovery scenarios: database disconnect, memory overflow"
|
390 |
-
}
|
391 |
-
]
|
392 |
-
|
393 |
-
def generate_template_based_test_cases(srs_text: str) -> List[Dict]:
|
394 |
-
"""Generate comprehensive template-based test cases using pattern analysis"""
|
395 |
-
detected_patterns = analyze_requirements(srs_text)
|
396 |
-
all_test_cases = []
|
397 |
-
|
398 |
-
# Generate specific test cases based on detected patterns
|
399 |
-
for pattern_name, pattern_data in detected_patterns.items():
|
400 |
-
if pattern_name == 'authentication':
|
401 |
-
tests = generate_authentication_tests(pattern_data['matches'])
|
402 |
-
elif pattern_name == 'data_validation':
|
403 |
-
tests = generate_data_validation_tests(pattern_data['matches'])
|
404 |
-
elif pattern_name == 'performance':
|
405 |
-
tests = generate_performance_tests(pattern_data['matches'])
|
406 |
-
elif pattern_name == 'api':
|
407 |
-
tests = generate_api_tests(pattern_data['matches'])
|
408 |
-
elif pattern_name == 'error_handling':
|
409 |
-
tests = generate_error_handling_tests(pattern_data['matches'])
|
410 |
-
else:
|
411 |
-
# Generate generic tests for other patterns
|
412 |
-
tests = generate_generic_tests(pattern_name, pattern_data)
|
413 |
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
test['priority'] = pattern_data['priority']
|
418 |
-
test['category'] = pattern_data['category']
|
419 |
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
"
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
"postconditions": ["System state is valid"],
|
443 |
-
"test_data": "Valid test data as per requirements"
|
444 |
-
},
|
445 |
-
{
|
446 |
-
"title": f"{pattern_name.replace('_', ' ').title()} - Negative Test",
|
447 |
-
"description": f"Verify {pattern_name.replace('_', ' ')} handles invalid scenarios",
|
448 |
-
"preconditions": ["System is accessible"],
|
449 |
-
"steps": [
|
450 |
-
f"Access {pattern_name.replace('_', ' ')} feature",
|
451 |
-
"Perform invalid operation",
|
452 |
-
"Verify error handling"
|
453 |
-
],
|
454 |
-
"expected": f"Invalid {pattern_name.replace('_', ' ')} operation handled gracefully",
|
455 |
-
"postconditions": ["System remains stable"],
|
456 |
-
"test_data": "Invalid test data to trigger error conditions"
|
457 |
-
}
|
458 |
-
]
|
459 |
-
|
460 |
-
def generate_generic_functional_tests(srs_text: str) -> List[Dict]:
|
461 |
-
"""Generate generic functional test cases when no specific patterns are detected"""
|
462 |
-
return [
|
463 |
-
{
|
464 |
-
"id": "TC_FUNC_001",
|
465 |
-
"title": "Basic System Functionality",
|
466 |
-
"priority": "High",
|
467 |
-
"category": "Functional",
|
468 |
-
"description": "Verify core system functionality works as specified",
|
469 |
-
"preconditions": ["System is deployed and accessible", "Test environment is configured"],
|
470 |
-
"steps": [
|
471 |
-
"Access the system/application",
|
472 |
-
"Navigate through main features",
|
473 |
-
"Execute primary use cases",
|
474 |
-
"Verify all functions work correctly"
|
475 |
-
],
|
476 |
-
"expected": "All core functionality operates according to requirements",
|
477 |
-
"postconditions": ["System demonstrates full functionality"],
|
478 |
-
"test_data": "Standard test data set as defined in requirements"
|
479 |
-
},
|
480 |
-
{
|
481 |
-
"id": "TC_FUNC_002",
|
482 |
-
"title": "Input Validation and Processing",
|
483 |
-
"priority": "High",
|
484 |
-
"category": "Functional",
|
485 |
-
"description": "Test system's ability to validate and process various inputs",
|
486 |
-
"preconditions": ["System accepts user input"],
|
487 |
-
"steps": [
|
488 |
-
"Enter valid data in all input fields",
|
489 |
-
"Submit data and verify processing",
|
490 |
-
"Enter invalid data and verify rejection",
|
491 |
-
"Test boundary conditions"
|
492 |
-
],
|
493 |
-
"expected": "Valid data processed correctly, invalid data rejected with appropriate messages",
|
494 |
-
"postconditions": ["Data integrity maintained"],
|
495 |
-
"test_data": "Mix of valid, invalid, and boundary test data"
|
496 |
-
},
|
497 |
-
{
|
498 |
-
"id": "TC_FUNC_003",
|
499 |
-
"title": "System Integration and Workflow",
|
500 |
-
"priority": "Medium",
|
501 |
-
"category": "Integration",
|
502 |
-
"description": "Verify end-to-end workflow and system integration",
|
503 |
-
"preconditions": ["All system components are integrated"],
|
504 |
-
"steps": [
|
505 |
-
"Execute complete business workflow",
|
506 |
-
"Verify data flow between components",
|
507 |
-
"Test system integration points",
|
508 |
-
"Validate end-to-end functionality"
|
509 |
-
],
|
510 |
-
"expected": "Complete workflow executes successfully with proper data flow",
|
511 |
-
"postconditions": ["Workflow completion confirmed"],
|
512 |
-
"test_data": "Complete dataset for end-to-end testing"
|
513 |
-
}
|
514 |
-
]
|
515 |
-
|
516 |
-
def parse_generated_test_cases(generated_text: str) -> List[Dict]:
|
517 |
-
"""Parse AI-generated text into structured test cases"""
|
518 |
-
lines = generated_text.split('\n')
|
519 |
-
test_cases = []
|
520 |
-
current_case = {}
|
521 |
-
case_counter = 1
|
522 |
-
|
523 |
-
for line in lines:
|
524 |
-
line = line.strip()
|
525 |
-
if line.startswith(('1.', '2.', '3.', 'TC', 'Test')):
|
526 |
-
if current_case:
|
527 |
-
test_cases.append(current_case)
|
528 |
-
current_case = {
|
529 |
-
"id": f"TC_AI_{case_counter:03d}",
|
530 |
-
"title": line,
|
531 |
-
"priority": "Medium",
|
532 |
-
"category": "Functional",
|
533 |
-
"description": line,
|
534 |
-
"preconditions": ["System is accessible"],
|
535 |
-
"steps": ["Execute the test procedure"],
|
536 |
-
"expected": "Test should pass according to requirements",
|
537 |
-
"postconditions": ["System state verified"],
|
538 |
-
"test_data": "As specified in requirements"
|
539 |
-
}
|
540 |
-
case_counter += 1
|
541 |
-
|
542 |
-
if current_case:
|
543 |
-
test_cases.append(current_case)
|
544 |
-
|
545 |
-
if not test_cases:
|
546 |
-
return [{
|
547 |
-
"id": "TC_AI_001",
|
548 |
-
"title": "AI Generated Test Case",
|
549 |
-
"priority": "Medium",
|
550 |
-
"category": "Functional",
|
551 |
-
"description": "Auto-generated test case based on AI analysis",
|
552 |
-
"preconditions": ["System meets specified requirements"],
|
553 |
-
"steps": ["Review requirements", "Execute test procedure", "Verify results"],
|
554 |
-
"expected": "Requirements should be met as specified",
|
555 |
-
"postconditions": ["Test completion verified"],
|
556 |
-
"test_data": "Test data as defined in requirements"
|
557 |
-
}]
|
558 |
-
|
559 |
-
return test_cases
|
560 |
-
|
561 |
-
def generate_with_ai_model(srs_text: str, tokenizer, model) -> List[Dict]:
|
562 |
-
"""Generate test cases using AI model"""
|
563 |
-
max_input_length = 300
|
564 |
-
if len(srs_text) > max_input_length:
|
565 |
-
srs_text = srs_text[:max_input_length]
|
566 |
-
|
567 |
-
prompt = f"""Generate comprehensive test cases for this software requirement:
|
568 |
-
{srs_text}
|
569 |
-
|
570 |
-
Test Cases:
|
571 |
-
1."""
|
572 |
-
|
573 |
-
try:
|
574 |
-
inputs = tokenizer.encode(
|
575 |
-
prompt,
|
576 |
-
return_tensors="pt",
|
577 |
-
max_length=200,
|
578 |
-
truncation=True
|
579 |
-
)
|
580 |
-
|
581 |
-
with torch.no_grad():
|
582 |
-
outputs = model.generate(
|
583 |
-
inputs,
|
584 |
-
max_new_tokens=150,
|
585 |
-
num_return_sequences=1,
|
586 |
-
temperature=0.7,
|
587 |
-
do_sample=True,
|
588 |
-
pad_token_id=tokenizer.eos_token_id,
|
589 |
-
use_cache=False,
|
590 |
)
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
608 |
try:
|
609 |
-
|
610 |
-
|
611 |
-
|
|
|
612 |
except Exception as e:
|
613 |
-
logger.warning(f"AI generation failed: {e}, falling back to
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
618 |
|
619 |
-
#
|
620 |
-
|
621 |
-
"""Main function to generate test cases"""
|
622 |
-
return generate_with_fallback(srs_text)[0]
|
623 |
|
624 |
-
def get_generator():
|
625 |
-
"""Get generator instance"""
|
626 |
global _generator_instance
|
627 |
if _generator_instance is None:
|
628 |
-
|
629 |
-
def __init__(self):
|
630 |
-
self.model_name = get_optimal_model_for_memory()
|
631 |
-
self.tokenizer = None
|
632 |
-
self.model = None
|
633 |
-
if self.model_name:
|
634 |
-
self.tokenizer, self.model = load_model_with_memory_optimization(self.model_name)
|
635 |
-
|
636 |
-
def get_model_info(self):
|
637 |
-
mem = psutil.Process().memory_info().rss / 1024 / 1024
|
638 |
-
return {
|
639 |
-
"model_name": self.model_name if self.model_name else "Enhanced Template-Based Generator",
|
640 |
-
"status": "loaded" if self.model else "enhanced_template_mode",
|
641 |
-
"memory_usage": f"{mem:.1f}MB",
|
642 |
-
"optimization": "low_memory_enhanced"
|
643 |
-
}
|
644 |
-
|
645 |
-
_generator_instance = Generator()
|
646 |
-
|
647 |
return _generator_instance
|
648 |
|
649 |
-
def
|
650 |
-
"""
|
651 |
-
|
652 |
-
|
653 |
-
|
654 |
-
|
655 |
-
|
656 |
-
|
657 |
-
|
658 |
-
|
659 |
-
|
660 |
return {
|
661 |
-
"model": model_name,
|
662 |
-
"algorithm":
|
663 |
-
"
|
664 |
-
"
|
665 |
}
|
666 |
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
|
675 |
-
|
676 |
-
|
677 |
-
|
678 |
-
|
679 |
-
|
680 |
-
|
681 |
-
else:
|
682 |
-
return ("Model selected based on optimal tradeoff between memory usage, language generation capability, "
|
683 |
-
"and test case quality requirements.")
|
684 |
-
|
|
|
5 |
import psutil
|
6 |
import re
|
7 |
import gc
|
8 |
+
from typing import List, Dict, Any, Optional, Tuple
|
9 |
+
from dataclasses import dataclass
|
10 |
|
11 |
+
# Configure logging
|
|
|
12 |
logging.basicConfig(level=logging.INFO)
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
|
15 |
+
# Constants
|
16 |
MEMORY_OPTIMIZED_MODELS = [
|
17 |
+
"distilgpt2", # ~250MB - default first choice
|
18 |
+
"microsoft/DialoGPT-small", # ~250MB - good for conversational context
|
19 |
+
"gpt2", # ~500MB - fallback if more memory available
|
|
|
20 |
]
|
21 |
|
22 |
+
MIN_MEMORY_FOR_MODEL = 500 # Minimum MB required to attempt model loading
|
23 |
+
MAX_TEST_CASES = 15 # Maximum number of test cases to return
|
24 |
+
|
25 |
+
@dataclass
|
26 |
+
class TestCase:
|
27 |
+
id: str
|
28 |
+
title: str
|
29 |
+
description: str
|
30 |
+
preconditions: List[str]
|
31 |
+
steps: List[str]
|
32 |
+
expected: str
|
33 |
+
postconditions: List[str]
|
34 |
+
test_data: str
|
35 |
+
priority: str = "Medium"
|
36 |
+
category: str = "Functional"
|
37 |
+
|
38 |
+
# Enhanced pattern matching with more categories
|
39 |
REQUIREMENT_PATTERNS = {
|
40 |
'authentication': {
|
41 |
'keywords': ['login', 'authentication', 'signin', 'sign in', 'password', 'username', 'credential', 'auth'],
|
42 |
'priority': 'High',
|
43 |
+
'category': 'Security',
|
44 |
+
'generator': 'generate_security_tests'
|
45 |
},
|
46 |
'authorization': {
|
47 |
'keywords': ['permission', 'role', 'access', 'privilege', 'authorize', 'admin', 'user level'],
|
48 |
'priority': 'High',
|
49 |
+
'category': 'Security',
|
50 |
+
'generator': 'generate_security_tests'
|
51 |
},
|
52 |
'data_validation': {
|
53 |
'keywords': ['validate', 'validation', 'input', 'format', 'check', 'verify', 'constraint'],
|
54 |
'priority': 'High',
|
55 |
+
'category': 'Functional',
|
56 |
+
'generator': 'generate_validation_tests'
|
57 |
},
|
58 |
'database': {
|
59 |
'keywords': ['database', 'db', 'store', 'save', 'persist', 'record', 'data storage', 'crud'],
|
60 |
'priority': 'Medium',
|
61 |
+
'category': 'Data',
|
62 |
+
'generator': 'generate_data_tests'
|
63 |
},
|
64 |
'performance': {
|
65 |
'keywords': ['performance', 'speed', 'time', 'response', 'load', 'concurrent', 'scalability'],
|
66 |
'priority': 'Medium',
|
67 |
+
'category': 'Performance',
|
68 |
+
'generator': 'generate_performance_tests'
|
|
|
|
|
|
|
|
|
69 |
},
|
70 |
'api': {
|
71 |
'keywords': ['api', 'endpoint', 'service', 'request', 'response', 'rest', 'http'],
|
72 |
'priority': 'High',
|
73 |
+
'category': 'Integration',
|
74 |
+
'generator': 'generate_api_tests'
|
75 |
},
|
76 |
'error_handling': {
|
77 |
'keywords': ['error', 'exception', 'failure', 'invalid', 'incorrect', 'wrong'],
|
78 |
'priority': 'High',
|
79 |
+
'category': 'Reliability',
|
80 |
+
'generator': 'generate_error_tests'
|
|
|
|
|
|
|
|
|
81 |
},
|
82 |
'security': {
|
83 |
'keywords': ['security', 'encrypt', 'secure', 'ssl', 'https', 'token', 'session'],
|
84 |
'priority': 'High',
|
85 |
+
'category': 'Security',
|
86 |
+
'generator': 'generate_security_tests'
|
87 |
}
|
88 |
}
|
89 |
|
90 |
+
class TestCaseGenerator:
|
91 |
+
"""Main class for generating test cases with AI and template fallback"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
+
def __init__(self):
|
94 |
+
self.model_name = None
|
95 |
+
self.tokenizer = None
|
96 |
+
self.model = None
|
97 |
+
self._initialize_model()
|
|
|
|
|
|
|
98 |
|
99 |
+
def _initialize_model(self):
|
100 |
+
"""Initialize the optimal model based on available memory"""
|
101 |
+
available_mem = psutil.virtual_memory().available / (1024 * 1024)
|
102 |
+
logger.info(f"Available memory: {available_mem:.1f}MB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
+
if available_mem < MIN_MEMORY_FOR_MODEL:
|
105 |
+
logger.warning("Insufficient memory for model loading, using template fallback")
|
106 |
+
return
|
|
|
|
|
107 |
|
108 |
+
# Try models in order of preference
|
109 |
+
for model_name in MEMORY_OPTIMIZED_MODELS:
|
110 |
+
try:
|
111 |
+
self.tokenizer, self.model = self._load_model_safely(model_name)
|
112 |
+
if self.model:
|
113 |
+
self.model_name = model_name
|
114 |
+
logger.info(f"Successfully loaded model: {model_name}")
|
115 |
+
break
|
116 |
+
except Exception as e:
|
117 |
+
logger.warning(f"Failed to load {model_name}: {str(e)}")
|
118 |
+
continue
|
119 |
+
|
120 |
+
def _load_model_safely(self, model_name: str) -> Tuple[Optional[AutoTokenizer], Optional[AutoModelForCausalLM]]:
|
121 |
+
"""Safely load model with memory optimizations"""
|
122 |
+
try:
|
123 |
+
logger.info(f"Attempting to load {model_name}")
|
124 |
+
|
125 |
+
# Load tokenizer first
|
126 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
127 |
+
model_name,
|
128 |
+
padding_side='left',
|
129 |
+
use_fast=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
)
|
131 |
+
|
132 |
+
# Ensure pad token is set
|
133 |
+
if tokenizer.pad_token is None:
|
134 |
+
tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token else '[PAD]'
|
135 |
+
|
136 |
+
# Load model with optimized settings
|
137 |
+
model = AutoModelForCausalLM.from_pretrained(
|
138 |
+
model_name,
|
139 |
+
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
140 |
+
low_cpu_mem_usage=True,
|
141 |
+
device_map="auto" if torch.cuda.is_available() else None
|
142 |
+
)
|
143 |
+
|
144 |
+
# Explicitly move to CPU if needed
|
145 |
+
if not torch.cuda.is_available():
|
146 |
+
model = model.to('cpu')
|
147 |
+
|
148 |
+
model.eval()
|
149 |
+
return tokenizer, model
|
150 |
+
|
151 |
+
except Exception as e:
|
152 |
+
logger.error(f"Error loading model {model_name}: {str(e)}")
|
153 |
+
# Clean up if partial load occurred
|
154 |
+
if 'tokenizer' in locals():
|
155 |
+
del tokenizer
|
156 |
+
if 'model' in locals() and model:
|
157 |
+
del model
|
158 |
+
gc.collect()
|
159 |
+
if torch.cuda.is_available():
|
160 |
+
torch.cuda.empty_cache()
|
161 |
+
return None, None
|
162 |
+
|
163 |
+
def generate_test_cases(self, srs_text: str) -> List[TestCase]:
|
164 |
+
"""Generate test cases using best available method"""
|
165 |
+
# First try AI generation if model is available
|
166 |
+
if self.model and self.tokenizer:
|
167 |
try:
|
168 |
+
ai_cases = self._generate_with_ai(srs_text)
|
169 |
+
if ai_cases:
|
170 |
+
logger.info("Successfully generated test cases with AI")
|
171 |
+
return ai_cases[:MAX_TEST_CASES]
|
172 |
except Exception as e:
|
173 |
+
logger.warning(f"AI generation failed: {str(e)}, falling back to templates")
|
174 |
+
|
175 |
+
# Fall back to template-based generation
|
176 |
+
return self._generate_with_templates(srs_text)[:MAX_TEST_CASES]
|
177 |
+
|
178 |
+
def _generate_with_ai(self, srs_text: str) -> List[TestCase]:
|
179 |
+
"""Generate test cases using AI model"""
|
180 |
+
max_input_length = 500 # Increased from 300 for better context
|
181 |
+
prompt = f"""Generate comprehensive test cases for these software requirements:
|
182 |
+
{self._truncate_text(srs_text, max_input_length)}
|
183 |
+
|
184 |
+
Provide test cases in this format:
|
185 |
+
1. [Test Case Title]
|
186 |
+
- Description: [description]
|
187 |
+
- Steps: [step1; step2; step3]
|
188 |
+
- Expected: [expected result]
|
189 |
+
|
190 |
+
2. [Next Test Case Title]..."""
|
191 |
+
|
192 |
+
try:
|
193 |
+
inputs = self.tokenizer(
|
194 |
+
prompt,
|
195 |
+
return_tensors="pt",
|
196 |
+
max_length=512,
|
197 |
+
truncation=True,
|
198 |
+
padding=True,
|
199 |
+
return_attention_mask=True
|
200 |
+
)
|
201 |
+
|
202 |
+
# Generate with more controlled parameters
|
203 |
+
with torch.no_grad():
|
204 |
+
outputs = self.model.generate(
|
205 |
+
input_ids=inputs['input_ids'],
|
206 |
+
attention_mask=inputs['attention_mask'],
|
207 |
+
max_new_tokens=300,
|
208 |
+
num_return_sequences=1,
|
209 |
+
temperature=0.7,
|
210 |
+
top_p=0.9,
|
211 |
+
do_sample=True,
|
212 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
213 |
+
eos_token_id=self.tokenizer.eos_token_id
|
214 |
+
)
|
215 |
+
|
216 |
+
generated = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
217 |
+
return self._parse_ai_output(generated)
|
218 |
+
|
219 |
+
except Exception as e:
|
220 |
+
logger.error(f"AI generation error: {str(e)}")
|
221 |
+
raise
|
222 |
+
finally:
|
223 |
+
# Clean up
|
224 |
+
if 'inputs' in locals():
|
225 |
+
del inputs
|
226 |
+
if 'outputs' in locals():
|
227 |
+
del outputs
|
228 |
+
gc.collect()
|
229 |
+
if torch.cuda.is_available():
|
230 |
+
torch.cuda.empty_cache()
|
231 |
+
|
232 |
+
def _parse_ai_output(self, text: str) -> List[TestCase]:
|
233 |
+
"""Parse AI-generated text into structured test cases"""
|
234 |
+
cases = []
|
235 |
+
current_case = None
|
236 |
+
|
237 |
+
for line in text.split('\n'):
|
238 |
+
line = line.strip()
|
239 |
+
if line.startswith(('1.', '2.', '3.', '4.', '5.', '6.', '7.', '8.', '9.')):
|
240 |
+
if current_case:
|
241 |
+
cases.append(current_case)
|
242 |
+
title = line[2:].strip()
|
243 |
+
current_case = TestCase(
|
244 |
+
id=f"TC_AI_{len(cases)+1:03d}",
|
245 |
+
title=title,
|
246 |
+
description="",
|
247 |
+
preconditions=["System is accessible"],
|
248 |
+
steps=[],
|
249 |
+
expected="",
|
250 |
+
postconditions=["Test executed"],
|
251 |
+
test_data="As specified in requirements",
|
252 |
+
priority="Medium",
|
253 |
+
category="Functional"
|
254 |
+
)
|
255 |
+
elif line.lower().startswith('description:') and current_case:
|
256 |
+
current_case.description = line[12:].strip()
|
257 |
+
elif line.lower().startswith('steps:') and current_case:
|
258 |
+
steps = line[6:].strip().split(';')
|
259 |
+
current_case.steps = [s.strip() for s in steps if s.strip()]
|
260 |
+
elif line.lower().startswith('expected:') and current_case:
|
261 |
+
current_case.expected = line[9:].strip()
|
262 |
+
|
263 |
+
if current_case:
|
264 |
+
cases.append(current_case)
|
265 |
+
|
266 |
+
return cases or [self._create_fallback_case()]
|
267 |
+
|
268 |
+
def _generate_with_templates(self, srs_text: str) -> List[TestCase]:
|
269 |
+
"""Generate test cases using pattern matching and templates"""
|
270 |
+
patterns = self._analyze_requirements(srs_text)
|
271 |
+
test_cases = []
|
272 |
+
|
273 |
+
for pattern_name, pattern_data in patterns.items():
|
274 |
+
generator_name = REQUIREMENT_PATTERNS[pattern_name]['generator']
|
275 |
+
generator = getattr(self, generator_name, self._generate_generic_tests)
|
276 |
+
cases = generator(pattern_data['matches'])
|
277 |
+
|
278 |
+
for i, case in enumerate(cases):
|
279 |
+
case.id = f"TC_{pattern_name.upper()}_{i+1:03d}"
|
280 |
+
case.priority = pattern_data['priority']
|
281 |
+
case.category = pattern_data['category']
|
282 |
+
test_cases.append(case)
|
283 |
+
|
284 |
+
return test_cases or [self._create_fallback_case()]
|
285 |
+
|
286 |
+
def _analyze_requirements(self, text: str) -> Dict[str, Any]:
|
287 |
+
"""Analyze text to detect requirement patterns"""
|
288 |
+
text_lower = text.lower()
|
289 |
+
detected = {}
|
290 |
+
|
291 |
+
for name, info in REQUIREMENT_PATTERNS.items():
|
292 |
+
matches = []
|
293 |
+
for kw in info['keywords']:
|
294 |
+
if kw in text_lower:
|
295 |
+
# Find context around keyword
|
296 |
+
context = re.findall(rf'.{{0,50}}{re.escape(kw)}.{{0,50}}', text_lower)
|
297 |
+
matches.extend(context[:3]) # Limit contexts
|
298 |
+
|
299 |
+
if matches:
|
300 |
+
detected[name] = {
|
301 |
+
'matches': matches,
|
302 |
+
'priority': info['priority'],
|
303 |
+
'category': info['category']
|
304 |
+
}
|
305 |
+
|
306 |
+
return detected
|
307 |
+
|
308 |
+
def _create_fallback_case(self) -> TestCase:
|
309 |
+
"""Create a generic fallback test case"""
|
310 |
+
return TestCase(
|
311 |
+
id="TC_GEN_001",
|
312 |
+
title="General Functionality Test",
|
313 |
+
description="Verify basic system functionality",
|
314 |
+
preconditions=["System is accessible"],
|
315 |
+
steps=["Execute core functionality"],
|
316 |
+
expected="System behaves as expected",
|
317 |
+
postconditions=["Test completed"],
|
318 |
+
test_data="Standard test data",
|
319 |
+
priority="Medium",
|
320 |
+
category="Functional"
|
321 |
+
)
|
322 |
+
|
323 |
+
def _truncate_text(self, text: str, max_length: int) -> str:
|
324 |
+
"""Safely truncate text to maximum length"""
|
325 |
+
return text[:max_length] + '...' if len(text) > max_length else text
|
326 |
+
|
327 |
+
# Template generators for different test types
|
328 |
+
def generate_security_tests(self, matches: List[str]) -> List[TestCase]:
|
329 |
+
"""Generate security-related test cases"""
|
330 |
+
return [
|
331 |
+
TestCase(
|
332 |
+
id="",
|
333 |
+
title="Authentication Validation",
|
334 |
+
description="Verify proper authentication mechanism",
|
335 |
+
preconditions=["System has authentication configured"],
|
336 |
+
steps=[
|
337 |
+
"Attempt login with valid credentials",
|
338 |
+
"Attempt login with invalid credentials",
|
339 |
+
"Verify session handling"
|
340 |
+
],
|
341 |
+
expected="Valid login succeeds, invalid fails, sessions secure",
|
342 |
+
postconditions=["Security verified"],
|
343 |
+
test_data="Test credentials",
|
344 |
+
priority="High",
|
345 |
+
category="Security"
|
346 |
+
),
|
347 |
+
# Additional security test cases...
|
348 |
+
]
|
349 |
+
|
350 |
+
def generate_validation_tests(self, matches: List[str]) -> List[TestCase]:
|
351 |
+
"""Generate data validation test cases"""
|
352 |
+
return [
|
353 |
+
TestCase(
|
354 |
+
id="",
|
355 |
+
title="Input Validation",
|
356 |
+
description="Verify input validation rules",
|
357 |
+
preconditions=["System accepts user input"],
|
358 |
+
steps=[
|
359 |
+
"Enter valid input",
|
360 |
+
"Enter invalid input",
|
361 |
+
"Verify system response"
|
362 |
+
],
|
363 |
+
expected="Valid input accepted, invalid rejected with messages",
|
364 |
+
postconditions=["Validation rules verified"],
|
365 |
+
test_data="Valid and invalid test data",
|
366 |
+
priority="High",
|
367 |
+
category="Functional"
|
368 |
+
)
|
369 |
+
]
|
370 |
+
|
371 |
+
# Additional generator methods for other test types...
|
372 |
+
# generate_performance_tests, generate_api_tests, etc.
|
373 |
|
374 |
+
# Singleton instance
|
375 |
+
_generator_instance = None
|
|
|
|
|
376 |
|
377 |
+
def get_generator() -> TestCaseGenerator:
|
378 |
+
"""Get the singleton generator instance"""
|
379 |
global _generator_instance
|
380 |
if _generator_instance is None:
|
381 |
+
_generator_instance = TestCaseGenerator()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
return _generator_instance
|
383 |
|
384 |
+
def generate_test_cases(srs_text: str) -> List[Dict]:
|
385 |
+
"""Main API function to generate test cases"""
|
386 |
+
generator = get_generator()
|
387 |
+
cases = generator.generate_test_cases(srs_text)
|
388 |
+
return [case.__dict__ for case in cases]
|
389 |
+
|
390 |
+
def generate_test_cases_and_info(srs_text: str) -> Dict[str, Any]:
|
391 |
+
"""Generate test cases with metadata about generation method"""
|
392 |
+
generator = get_generator()
|
393 |
+
cases = generator.generate_test_cases(srs_text)
|
394 |
+
|
395 |
return {
|
396 |
+
"model": generator.model_name or "Template-Based",
|
397 |
+
"algorithm": "AI" if generator.model else "Template",
|
398 |
+
"test_cases": [case.__dict__ for case in cases],
|
399 |
+
"memory_usage": f"{psutil.Process().memory_info().rss / (1024 * 1024):.1f}MB"
|
400 |
}
|
401 |
|
402 |
+
# Example usage
|
403 |
+
if __name__ == "__main__":
|
404 |
+
sample_reqs = """
|
405 |
+
The system must implement secure user authentication with password hashing.
|
406 |
+
All API endpoints must validate input data and return appropriate error codes.
|
407 |
+
The application should handle 100 concurrent users with response times under 2 seconds.
|
408 |
+
"""
|
409 |
+
|
410 |
+
print("Generating test cases...")
|
411 |
+
test_cases = generate_test_cases(sample_reqs)
|
412 |
+
for case in test_cases:
|
413 |
+
print(f"\n{case['id']}: {case['title']}")
|
414 |
+
print(f"Priority: {case['priority']}, Category: {case['category']}")
|
415 |
+
print(f"Steps: {case['steps']}")
|
|
|
|
|
|
|
|