Spaces:
Sleeping
Sleeping
File size: 2,994 Bytes
d5f7564 6e97b2d 14e4a92 a6a5465 d5f7564 6e97b2d 953f3ea 6e97b2d a6a5465 6e97b2d a6a5465 315b0b5 6e97b2d a6a5465 a40ec6a a6a5465 a40ec6a 6e97b2d 0e81368 6e97b2d 0e81368 6e97b2d a40ec6a a6a5465 d5f7564 0e81368 a6a5465 14e4a92 a6a5465 026025c a6a5465 14e4a92 a6a5465 14e4a92 a6a5465 14e4a92 6e97b2d 14e4a92 a6a5465 14e4a92 d5f7564 a6a5465 026025c a6a5465 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import logging
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
# Set up logging
logging.basicConfig(
filename="app.log",
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s"
)
# Select the model you want to use (LLaMA, GPT, or CodeT5)
MODEL_NAME = "Qwen/Qwen2.5-Coder-3B-Instruct"
# Load the model and tokenizer
logging.info(f"Loading the model: {MODEL_NAME}...")
try:
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
logging.info("Model loaded successfully.")
except Exception as e:
logging.error(f"Error loading the model: {e}")
raise
# Define a function to generate test cases
def generate_test_cases(api_info):
logging.info(f"Generating test cases for API info: {api_info}")
try:
prompt = (
f"Generate API test cases for the following API:\n\n{api_info}\n\n"
f"Test cases should include:\n- Happy path\n- Negative tests\n- Edge cases"
)
# Tokenize the input prompt
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
# Generate output from the model
outputs = model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"], # Provide attention mask explicitly
max_length=512,
num_return_sequences=1,
do_sample=True
)
# Decode the generated text
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
logging.info("Test cases generated successfully.")
return generated_text
except Exception as e:
logging.error(f"Error generating test cases: {e}")
return "An error occurred while generating test cases."
# Process input and generate output
def process_input(url, method, headers, payload):
try:
logging.info("Received user input.")
api_info = f"URL: {url}\nMethod: {method}\nHeaders: {headers}\nPayload: {payload}"
logging.debug(f"Formatted API info: {api_info}")
test_cases = generate_test_cases(api_info)
return test_cases
except Exception as e:
logging.error(f"Error processing input: {e}")
return "An error occurred. Please check the input format and try again."
# Define Gradio interface
interface = gr.Interface(
fn=process_input,
inputs=[
gr.Textbox(label="API URL"),
gr.Textbox(label="HTTP Method"),
gr.Textbox(label="Headers (JSON format)"),
gr.Textbox(label="Payload (JSON format)"),
],
outputs="text",
title=f"API Test Case Generator ({MODEL_NAME})"
)
# Launch Gradio app
if __name__ == "__main__":
try:
logging.info("Starting the Gradio app...")
interface.launch()
logging.info("Gradio app launched successfully.")
except Exception as e:
logging.error(f"Error launching the Gradio app: {e}")
|