apilux / app.py
prnvtripathi14's picture
update app.py
026025c verified
raw
history blame
4.09 kB
import logging
import json
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
import gradio as gr
# Configure logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Models to try
MODELS_TO_TRY = [
"google/flan-t5-xxl", # Powerful instruction-following model
"bigscience/T0pp", # Optimized for zero-shot tasks
"t5-large", # General-purpose text generation
"google/flan-t5-large" # Lightweight instruction-tuned model
]
def load_model():
"""
Attempt to load a suitable model for text generation.
"""
for model_name in MODELS_TO_TRY:
try:
logger.info(f"Attempting to load model: {model_name}")
# Load model and tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Create the text generation pipeline
generator = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=512,
num_return_sequences=1
)
logger.info(f"Successfully loaded model: {model_name}")
return generator
except Exception as e:
logger.error(f"Failed to load model {model_name}: {e}")
logger.error("All model attempts failed. No model loaded.")
return None
# Load the generator at startup
generator = load_model()
def generate_test_cases(method, url, headers, payload=""):
"""
Generate detailed API test cases using a language model.
"""
try:
# Input validation and logging
logger.info(f"Received inputs: Method={method}, URL={url}, Headers={headers}, Payload={payload}")
if not method or not url:
return "Error: HTTP Method and API URL are required inputs."
# Parse headers and payload as JSON
try:
headers_dict = json.loads(headers) if headers.strip() else {}
payload_dict = json.loads(payload) if payload.strip() else {}
except json.JSONDecodeError as e:
return f"JSON Parsing Error: {e}"
# Prompt for the model
prompt = f"""
Generate comprehensive API test cases for the following:
HTTP Method: {method}
API URL: {url}
Headers: {json.dumps(headers_dict, indent=2)}
Payload: {json.dumps(payload_dict, indent=2)}
Requirements:
- Include Happy Path, Negative, and Edge Cases.
- Provide validation steps and expected results.
"""
# Ensure model is loaded
if generator is None:
return "Error: No model is available for test case generation."
# Generate test cases
response = generator(prompt, max_length=500, num_return_sequences=1)
generated_text = response[0]['generated_text']
logger.info("Successfully generated test cases.")
return generated_text
except Exception as e:
logger.error(f"Error during test case generation: {e}")
return f"Error: {e}"
# Gradio Interface
iface = gr.Interface(
fn=generate_test_cases,
inputs=[
gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET"),
gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
],
outputs="text",
title="API Test Case Generator",
description="Generate detailed API test cases using AI models."
)
# Main execution
if __name__ == "__main__":
try:
logger.info("Launching Gradio interface...")
iface.launch()
except Exception as e:
logger.error(f"Failed to launch Gradio interface: {e}")