Spaces:
Sleeping
Sleeping
File size: 3,783 Bytes
d5f7564 315b0b5 14e4a92 d5f7564 026025c d5f7564 a40ec6a 71b186c 315b0b5 026025c a40ec6a 026025c a40ec6a 315b0b5 026025c 315b0b5 d5f7564 026025c 14e4a92 026025c d5f7564 026025c d5f7564 026025c d5f7564 026025c d5f7564 026025c 315b0b5 026025c 315b0b5 026025c d5f7564 026025c d5f7564 026025c d5f7564 026025c 315b0b5 026025c d5f7564 026025c 14e4a92 315b0b5 14e4a92 026025c d5f7564 2c9c8ee 14e4a92 026025c 14e4a92 d5f7564 14e4a92 d5f7564 026025c d5f7564 026025c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import logging
import json
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
import gradio as gr
# Configure logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Model to use
MODEL_NAME = "eltorio/Llama-3.2-3B-appreciation-F16-GGUF"
def load_model():
"""
Load the selected model and tokenizer using PyTorch.
"""
try:
logger.info(f"Loading model: {MODEL_NAME} with PyTorch backend")
# Load the model and tokenizer
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, framework="pt")
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# Create the text generation pipeline
generator = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
framework="pt", # Specify PyTorch framework
max_length=512,
num_return_sequences=1
)
logger.info(f"Successfully loaded model: {MODEL_NAME}")
return generator
except Exception as e:
logger.error(f"Failed to load model {MODEL_NAME}: {e}")
return None
# Load the generator at startup
generator = load_model()
def generate_test_cases(method, url, headers, payload=""):
"""
Generate detailed API test cases using a language model.
"""
try:
# Input validation and logging
logger.info(f"Received inputs: Method={method}, URL={url}, Headers={headers}, Payload={payload}")
if not method or not url:
return "Error: HTTP Method and API URL are required inputs."
# Parse headers and payload as JSON
try:
headers_dict = json.loads(headers) if headers.strip() else {}
payload_dict = json.loads(payload) if payload.strip() else {}
except json.JSONDecodeError as e:
return f"JSON Parsing Error: {e}"
# Prompt for the model
prompt = f"""
Generate comprehensive API test cases for the following:
HTTP Method: {method}
API URL: {url}
Headers: {json.dumps(headers_dict, indent=2)}
Payload: {json.dumps(payload_dict, indent=2)}
Requirements:
- Include Happy Path, Negative, and Edge Cases.
- Provide validation steps and expected results.
"""
# Ensure model is loaded
if generator is None:
return "Error: No model is available for test case generation."
# Generate test cases
response = generator(prompt, max_length=500, num_return_sequences=1)
generated_text = response[0]['generated_text']
logger.info("Successfully generated test cases.")
return generated_text
except Exception as e:
logger.error(f"Error during test case generation: {e}")
return f"Error: {e}"
# Gradio Interface
iface = gr.Interface(
fn=generate_test_cases,
inputs=[
gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET"),
gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
],
outputs="text",
title="API Test Case Generator",
description="Generate detailed API test cases using AI models."
)
# Main execution
if __name__ == "__main__":
try:
logger.info("Launching Gradio interface...")
iface.launch()
except Exception as e:
logger.error(f"Failed to launch Gradio interface: {e}")
|