apilux / app.py
prnvtripathi14's picture
update app.py to have more models
315b0b5 verified
raw
history blame
5.35 kB
import logging
import json
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
import gradio as gr
# Configure logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Try multiple models in succession
MODELS_TO_TRY = [
"google/flan-t5-large", # More capable than base
"google/flan-t5-xl", # Even more capable
"facebook/bart-large-cnn", # Alternative model
"t5-large" # Fallback T5 model
]
def load_model():
for model_name in MODELS_TO_TRY:
try:
logger.info(f"Attempting to load model: {model_name}")
# Load model and tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Create pipeline with specific model and tokenizer
generator = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=500,
num_return_sequences=1
)
logger.info(f"Successfully loaded model: {model_name}")
return generator
except Exception as model_load_error:
logger.error(f"Failed to load model {model_name}: {model_load_error}")
logger.error("Failed to load any model")
return None
# Load model at startup
generator = load_model()
def generate_test_cases(method, url, headers, payload=""):
try:
# Detailed logging
logger.info(f"Generating test cases for:")
logger.info(f"Method: {method}")
logger.info(f"URL: {url}")
logger.info(f"Headers: {headers}")
logger.info(f"Payload: {payload}")
# Validate inputs
if not method or not url:
return "Error: Method and URL are required"
# Safely parse JSON inputs
try:
headers_dict = json.loads(headers) if headers else {}
payload_dict = json.loads(payload) if payload else {}
except json.JSONDecodeError as json_error:
return f"JSON Parsing Error: {json_error}"
# Comprehensive prompt for test case generation
prompt = f"""
Generate detailed API test cases with the following specifications:
Test Case Scenario: API Endpoint Testing
HTTP Method: {method}
API Endpoint: {url}
Request Headers: {json.dumps(headers_dict)}
Request Payload: {json.dumps(payload_dict)}
Test Case Requirements:
1. Happy Path Scenarios:
- Successful request with valid inputs
- Verify correct response status code
- Validate response structure and content
2. Negative Test Scenarios:
- Invalid authentication
- Malformed request payload
- Missing required headers
- Out-of-range parameter values
3. Edge Case Considerations:
- Maximum/minimum input limits
- Special character handling
- Unicode and internationalization testing
4. Performance and Security Checks:
- Response time validation
- Payload size limits
- Basic security vulnerability checks
Output Format:
For each test case, provide:
- Test Case ID
- Description
- Preconditions
- Input Data
- Expected Result
- Actual Result Verification Steps
"""
# Check if generator is available
if generator is None:
return "Error: No suitable model available for test case generation"
# Generate test cases
try:
response = generator(prompt)
generated_text = response[0]['generated_text']
logger.info("Test cases generated successfully")
logger.debug(f"Generated Text: {generated_text}")
return generated_text
except Exception as generation_error:
logger.error(f"Test case generation error: {generation_error}")
return f"Error generating test cases: {generation_error}"
except Exception as overall_error:
logger.error(f"Unexpected error: {overall_error}")
return f"Unexpected error: {overall_error}"
# Gradio Interface
iface = gr.Interface(
fn=generate_test_cases,
inputs=[
gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET, POST"),
gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
],
outputs="text",
title="Comprehensive API Test Case Generator",
description="Advanced test case generation using AI-powered language models"
)
# Main execution
if __name__ == "__main__":
try:
logger.info("Starting Gradio interface")
iface.launch()
except Exception as launch_error:
logger.error(f"Failed to launch Gradio interface: {launch_error}")