Spaces:
Sleeping
Sleeping
import logging | |
import json | |
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer | |
import gradio as gr | |
# Configure logging | |
logging.basicConfig(level=logging.DEBUG, | |
format='%(asctime)s - %(levelname)s - %(message)s') | |
logger = logging.getLogger(__name__) | |
# Model to use | |
MODEL_NAME = "eltorio/Llama-3.2-3B-appreciation-F16-GGUF" | |
def load_model(): | |
""" | |
Load the selected model and tokenizer using PyTorch. | |
""" | |
try: | |
logger.info(f"Loading model: {MODEL_NAME} with PyTorch backend") | |
# Load the model and tokenizer | |
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, framework="pt") | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
# Create the text generation pipeline | |
generator = pipeline( | |
"text2text-generation", | |
model=model, | |
tokenizer=tokenizer, | |
framework="pt", # Specify PyTorch framework | |
max_length=512, | |
num_return_sequences=1 | |
) | |
logger.info(f"Successfully loaded model: {MODEL_NAME}") | |
return generator | |
except Exception as e: | |
logger.error(f"Failed to load model {MODEL_NAME}: {e}") | |
return None | |
# Load the generator at startup | |
generator = load_model() | |
def generate_test_cases(method, url, headers, payload=""): | |
""" | |
Generate detailed API test cases using a language model. | |
""" | |
try: | |
# Input validation and logging | |
logger.info(f"Received inputs: Method={method}, URL={url}, Headers={headers}, Payload={payload}") | |
if not method or not url: | |
return "Error: HTTP Method and API URL are required inputs." | |
# Parse headers and payload as JSON | |
try: | |
headers_dict = json.loads(headers) if headers.strip() else {} | |
payload_dict = json.loads(payload) if payload.strip() else {} | |
except json.JSONDecodeError as e: | |
return f"JSON Parsing Error: {e}" | |
# Prompt for the model | |
prompt = f""" | |
Generate comprehensive API test cases for the following: | |
HTTP Method: {method} | |
API URL: {url} | |
Headers: {json.dumps(headers_dict, indent=2)} | |
Payload: {json.dumps(payload_dict, indent=2)} | |
Requirements: | |
- Include Happy Path, Negative, and Edge Cases. | |
- Provide validation steps and expected results. | |
""" | |
# Ensure model is loaded | |
if generator is None: | |
return "Error: No model is available for test case generation." | |
# Generate test cases | |
response = generator(prompt, max_length=500, num_return_sequences=1) | |
generated_text = response[0]['generated_text'] | |
logger.info("Successfully generated test cases.") | |
return generated_text | |
except Exception as e: | |
logger.error(f"Error during test case generation: {e}") | |
return f"Error: {e}" | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=generate_test_cases, | |
inputs=[ | |
gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET"), | |
gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"), | |
gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'), | |
gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'), | |
], | |
outputs="text", | |
title="API Test Case Generator", | |
description="Generate detailed API test cases using AI models." | |
) | |
# Main execution | |
if __name__ == "__main__": | |
try: | |
logger.info("Launching Gradio interface...") | |
iface.launch() | |
except Exception as e: | |
logger.error(f"Failed to launch Gradio interface: {e}") | |