prnvtripathi14 commited on
Commit
a6a5465
·
verified ·
1 Parent(s): 13067f9
Files changed (1) hide show
  1. app.py +48 -90
app.py CHANGED
@@ -1,110 +1,68 @@
1
  import logging
2
- import json
3
- from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
4
  import gradio as gr
5
 
6
- # Configure logging
7
- logging.basicConfig(level=logging.DEBUG,
8
- format='%(asctime)s - %(levelname)s - %(message)s')
9
- logger = logging.getLogger(__name__)
 
 
10
 
11
- # Model to use
12
- MODEL_NAME = "google/flan-t5-base"
 
 
 
 
 
 
13
 
14
- def load_model():
15
- """
16
- Load the selected model and tokenizer using PyTorch.
17
- """
18
  try:
19
- logger.info(f"Loading model: {MODEL_NAME} with PyTorch backend")
20
-
21
- # Load the model and tokenizer
22
- model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, framework="pt")
23
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
24
-
25
- # Create the text generation pipeline
26
- generator = pipeline(
27
- "text2text-generation",
28
- model=model,
29
- tokenizer=tokenizer,
30
- framework="pt", # Specify PyTorch framework
31
- max_length=512,
32
- num_return_sequences=1
33
  )
34
- logger.info(f"Successfully loaded model: {MODEL_NAME}")
35
- return generator
 
36
  except Exception as e:
37
- logger.error(f"Failed to load model {MODEL_NAME}: {e}")
38
- return None
39
-
40
- # Load the generator at startup
41
- generator = load_model()
42
 
43
- def generate_test_cases(method, url, headers, payload=""):
44
- """
45
- Generate detailed API test cases using a language model.
46
- """
47
  try:
48
- # Input validation and logging
49
- logger.info(f"Received inputs: Method={method}, URL={url}, Headers={headers}, Payload={payload}")
50
-
51
- if not method or not url:
52
- return "Error: HTTP Method and API URL are required inputs."
53
-
54
- # Parse headers and payload as JSON
55
- try:
56
- headers_dict = json.loads(headers) if headers.strip() else {}
57
- payload_dict = json.loads(payload) if payload.strip() else {}
58
- except json.JSONDecodeError as e:
59
- return f"JSON Parsing Error: {e}"
60
-
61
- # Prompt for the model
62
- prompt = f"""
63
- Generate comprehensive API test cases for the following:
64
-
65
- HTTP Method: {method}
66
- API URL: {url}
67
- Headers: {json.dumps(headers_dict, indent=2)}
68
- Payload: {json.dumps(payload_dict, indent=2)}
69
-
70
- Requirements:
71
- - Include Happy Path, Negative, and Edge Cases.
72
- - Provide validation steps and expected results.
73
- """
74
-
75
- # Ensure model is loaded
76
- if generator is None:
77
- return "Error: No model is available for test case generation."
78
-
79
- # Generate test cases
80
- response = generator(prompt, max_length=500, num_return_sequences=1)
81
- generated_text = response[0]['generated_text']
82
-
83
- logger.info("Successfully generated test cases.")
84
- return generated_text
85
-
86
  except Exception as e:
87
- logger.error(f"Error during test case generation: {e}")
88
- return f"Error: {e}"
89
 
90
- # Gradio Interface
91
- iface = gr.Interface(
92
- fn=generate_test_cases,
93
  inputs=[
94
- gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET"),
95
- gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
96
- gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
97
- gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
98
  ],
99
  outputs="text",
100
- title="API Test Case Generator",
101
- description="Generate detailed API test cases using AI models."
102
  )
103
 
104
- # Main execution
105
  if __name__ == "__main__":
106
  try:
107
- logger.info("Launching Gradio interface...")
108
- iface.launch()
 
109
  except Exception as e:
110
- logger.error(f"Failed to launch Gradio interface: {e}")
 
1
  import logging
2
+ from transformers import pipeline
 
3
  import gradio as gr
4
 
5
+ # Set up logging
6
+ logging.basicConfig(
7
+ filename="app.log",
8
+ level=logging.INFO,
9
+ format="%(asctime)s - %(levelname)s - %(message)s"
10
+ )
11
 
12
+ # Load the generative AI model
13
+ logging.info("Loading the Hugging Face model...")
14
+ try:
15
+ model = pipeline("text2text-generation", model="google/flan-t5-large") # Replace with your chosen model
16
+ logging.info("Model loaded successfully.")
17
+ except Exception as e:
18
+ logging.error(f"Error loading the model: {e}")
19
+ raise
20
 
21
+ # Function to generate test cases
22
+ def generate_test_cases(api_info):
23
+ logging.info(f"Generating test cases for API info: {api_info}")
 
24
  try:
25
+ prompt = (
26
+ f"Generate API test cases for the following API:\n\n{api_info}\n\n"
27
+ f"Test cases should include:\n- Happy path\n- Negative tests\n- Edge cases"
 
 
 
 
 
 
 
 
 
 
 
28
  )
29
+ result = model(prompt, max_length=512, num_return_sequences=1)
30
+ logging.info(f"Test cases generated successfully.")
31
+ return result[0]['generated_text']
32
  except Exception as e:
33
+ logging.error(f"Error generating test cases: {e}")
34
+ return "An error occurred while generating test cases."
 
 
 
35
 
36
+ # Process input and generate output
37
+ def process_input(url, method, headers, payload):
 
 
38
  try:
39
+ logging.info("Received user input.")
40
+ api_info = f"URL: {url}\nMethod: {method}\nHeaders: {headers}\nPayload: {payload}"
41
+ logging.debug(f"Formatted API info: {api_info}")
42
+ test_cases = generate_test_cases(api_info)
43
+ return test_cases
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  except Exception as e:
45
+ logging.error(f"Error processing input: {e}")
46
+ return "An error occurred. Please check the input format and try again."
47
 
48
+ # Define Gradio interface
49
+ interface = gr.Interface(
50
+ fn=process_input,
51
  inputs=[
52
+ gr.Textbox(label="API URL"),
53
+ gr.Textbox(label="HTTP Method"),
54
+ gr.Textbox(label="Headers (JSON format)"),
55
+ gr.Textbox(label="Payload (JSON format)"),
56
  ],
57
  outputs="text",
58
+ title="API Test Case Generator"
 
59
  )
60
 
61
+ # Launch Gradio app
62
  if __name__ == "__main__":
63
  try:
64
+ logging.info("Starting the Gradio app...")
65
+ interface.launch()
66
+ logging.info("Gradio app launched successfully.")
67
  except Exception as e:
68
+ logging.error(f"Error launching the Gradio app: {e}")