Spaces:
Sleeping
Sleeping
File size: 5,348 Bytes
d5f7564 315b0b5 14e4a92 d5f7564 315b0b5 d5f7564 14e4a92 315b0b5 d5f7564 315b0b5 d5f7564 315b0b5 d5f7564 315b0b5 d5f7564 315b0b5 d5f7564 315b0b5 d5f7564 315b0b5 d5f7564 14e4a92 315b0b5 14e4a92 d5f7564 2c9c8ee 14e4a92 315b0b5 14e4a92 d5f7564 14e4a92 d5f7564 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import logging
import json
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
import gradio as gr
# Configure logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Try multiple models in succession
MODELS_TO_TRY = [
"google/flan-t5-large", # More capable than base
"google/flan-t5-xl", # Even more capable
"facebook/bart-large-cnn", # Alternative model
"t5-large" # Fallback T5 model
]
def load_model():
for model_name in MODELS_TO_TRY:
try:
logger.info(f"Attempting to load model: {model_name}")
# Load model and tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Create pipeline with specific model and tokenizer
generator = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=500,
num_return_sequences=1
)
logger.info(f"Successfully loaded model: {model_name}")
return generator
except Exception as model_load_error:
logger.error(f"Failed to load model {model_name}: {model_load_error}")
logger.error("Failed to load any model")
return None
# Load model at startup
generator = load_model()
def generate_test_cases(method, url, headers, payload=""):
try:
# Detailed logging
logger.info(f"Generating test cases for:")
logger.info(f"Method: {method}")
logger.info(f"URL: {url}")
logger.info(f"Headers: {headers}")
logger.info(f"Payload: {payload}")
# Validate inputs
if not method or not url:
return "Error: Method and URL are required"
# Safely parse JSON inputs
try:
headers_dict = json.loads(headers) if headers else {}
payload_dict = json.loads(payload) if payload else {}
except json.JSONDecodeError as json_error:
return f"JSON Parsing Error: {json_error}"
# Comprehensive prompt for test case generation
prompt = f"""
Generate detailed API test cases with the following specifications:
Test Case Scenario: API Endpoint Testing
HTTP Method: {method}
API Endpoint: {url}
Request Headers: {json.dumps(headers_dict)}
Request Payload: {json.dumps(payload_dict)}
Test Case Requirements:
1. Happy Path Scenarios:
- Successful request with valid inputs
- Verify correct response status code
- Validate response structure and content
2. Negative Test Scenarios:
- Invalid authentication
- Malformed request payload
- Missing required headers
- Out-of-range parameter values
3. Edge Case Considerations:
- Maximum/minimum input limits
- Special character handling
- Unicode and internationalization testing
4. Performance and Security Checks:
- Response time validation
- Payload size limits
- Basic security vulnerability checks
Output Format:
For each test case, provide:
- Test Case ID
- Description
- Preconditions
- Input Data
- Expected Result
- Actual Result Verification Steps
"""
# Check if generator is available
if generator is None:
return "Error: No suitable model available for test case generation"
# Generate test cases
try:
response = generator(prompt)
generated_text = response[0]['generated_text']
logger.info("Test cases generated successfully")
logger.debug(f"Generated Text: {generated_text}")
return generated_text
except Exception as generation_error:
logger.error(f"Test case generation error: {generation_error}")
return f"Error generating test cases: {generation_error}"
except Exception as overall_error:
logger.error(f"Unexpected error: {overall_error}")
return f"Unexpected error: {overall_error}"
# Gradio Interface
iface = gr.Interface(
fn=generate_test_cases,
inputs=[
gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET, POST"),
gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
],
outputs="text",
title="Comprehensive API Test Case Generator",
description="Advanced test case generation using AI-powered language models"
)
# Main execution
if __name__ == "__main__":
try:
logger.info("Starting Gradio interface")
iface.launch()
except Exception as launch_error:
logger.error(f"Failed to launch Gradio interface: {launch_error}") |