prnvtripathi14 commited on
Commit
026025c
·
verified ·
1 Parent(s): 315b0b5

update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -89
app.py CHANGED
@@ -4,19 +4,22 @@ from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer
4
  import gradio as gr
5
 
6
  # Configure logging
7
- logging.basicConfig(level=logging.DEBUG,
8
  format='%(asctime)s - %(levelname)s - %(message)s')
9
  logger = logging.getLogger(__name__)
10
 
11
- # Try multiple models in succession
12
  MODELS_TO_TRY = [
13
- "google/flan-t5-large", # More capable than base
14
- "google/flan-t5-xl", # Even more capable
15
- "facebook/bart-large-cnn", # Alternative model
16
- "t5-large" # Fallback T5 model
17
  ]
18
 
19
  def load_model():
 
 
 
20
  for model_name in MODELS_TO_TRY:
21
  try:
22
  logger.info(f"Attempting to load model: {model_name}")
@@ -25,129 +28,90 @@ def load_model():
25
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
26
  tokenizer = AutoTokenizer.from_pretrained(model_name)
27
 
28
- # Create pipeline with specific model and tokenizer
29
  generator = pipeline(
30
- "text2text-generation",
31
- model=model,
32
  tokenizer=tokenizer,
33
- max_length=500,
34
  num_return_sequences=1
35
  )
36
-
37
  logger.info(f"Successfully loaded model: {model_name}")
38
  return generator
39
- except Exception as model_load_error:
40
- logger.error(f"Failed to load model {model_name}: {model_load_error}")
41
 
42
- logger.error("Failed to load any model")
43
  return None
44
 
45
- # Load model at startup
46
  generator = load_model()
47
 
48
  def generate_test_cases(method, url, headers, payload=""):
 
 
 
49
  try:
50
- # Detailed logging
51
- logger.info(f"Generating test cases for:")
52
- logger.info(f"Method: {method}")
53
- logger.info(f"URL: {url}")
54
- logger.info(f"Headers: {headers}")
55
- logger.info(f"Payload: {payload}")
56
-
57
- # Validate inputs
58
  if not method or not url:
59
- return "Error: Method and URL are required"
60
-
61
- # Safely parse JSON inputs
62
  try:
63
- headers_dict = json.loads(headers) if headers else {}
64
- payload_dict = json.loads(payload) if payload else {}
65
- except json.JSONDecodeError as json_error:
66
- return f"JSON Parsing Error: {json_error}"
67
 
68
- # Comprehensive prompt for test case generation
69
  prompt = f"""
70
- Generate detailed API test cases with the following specifications:
71
-
72
- Test Case Scenario: API Endpoint Testing
73
 
74
  HTTP Method: {method}
75
- API Endpoint: {url}
76
- Request Headers: {json.dumps(headers_dict)}
77
- Request Payload: {json.dumps(payload_dict)}
78
-
79
- Test Case Requirements:
80
- 1. Happy Path Scenarios:
81
- - Successful request with valid inputs
82
- - Verify correct response status code
83
- - Validate response structure and content
84
 
85
- 2. Negative Test Scenarios:
86
- - Invalid authentication
87
- - Malformed request payload
88
- - Missing required headers
89
- - Out-of-range parameter values
90
-
91
- 3. Edge Case Considerations:
92
- - Maximum/minimum input limits
93
- - Special character handling
94
- - Unicode and internationalization testing
95
-
96
- 4. Performance and Security Checks:
97
- - Response time validation
98
- - Payload size limits
99
- - Basic security vulnerability checks
100
-
101
- Output Format:
102
- For each test case, provide:
103
- - Test Case ID
104
- - Description
105
- - Preconditions
106
- - Input Data
107
- - Expected Result
108
- - Actual Result Verification Steps
109
  """
110
 
111
- # Check if generator is available
112
  if generator is None:
113
- return "Error: No suitable model available for test case generation"
114
 
115
  # Generate test cases
116
- try:
117
- response = generator(prompt)
118
- generated_text = response[0]['generated_text']
119
-
120
- logger.info("Test cases generated successfully")
121
- logger.debug(f"Generated Text: {generated_text}")
122
-
123
- return generated_text
124
 
125
- except Exception as generation_error:
126
- logger.error(f"Test case generation error: {generation_error}")
127
- return f"Error generating test cases: {generation_error}"
128
 
129
- except Exception as overall_error:
130
- logger.error(f"Unexpected error: {overall_error}")
131
- return f"Unexpected error: {overall_error}"
132
 
133
  # Gradio Interface
134
  iface = gr.Interface(
135
  fn=generate_test_cases,
136
  inputs=[
137
- gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET, POST"),
138
  gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
139
  gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
140
  gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
141
  ],
142
  outputs="text",
143
- title="Comprehensive API Test Case Generator",
144
- description="Advanced test case generation using AI-powered language models"
145
  )
146
 
147
  # Main execution
148
  if __name__ == "__main__":
149
  try:
150
- logger.info("Starting Gradio interface")
151
  iface.launch()
152
- except Exception as launch_error:
153
- logger.error(f"Failed to launch Gradio interface: {launch_error}")
 
4
  import gradio as gr
5
 
6
  # Configure logging
7
+ logging.basicConfig(level=logging.DEBUG,
8
  format='%(asctime)s - %(levelname)s - %(message)s')
9
  logger = logging.getLogger(__name__)
10
 
11
+ # Models to try
12
  MODELS_TO_TRY = [
13
+ "google/flan-t5-xxl", # Powerful instruction-following model
14
+ "bigscience/T0pp", # Optimized for zero-shot tasks
15
+ "t5-large", # General-purpose text generation
16
+ "google/flan-t5-large" # Lightweight instruction-tuned model
17
  ]
18
 
19
  def load_model():
20
+ """
21
+ Attempt to load a suitable model for text generation.
22
+ """
23
  for model_name in MODELS_TO_TRY:
24
  try:
25
  logger.info(f"Attempting to load model: {model_name}")
 
28
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
29
  tokenizer = AutoTokenizer.from_pretrained(model_name)
30
 
31
+ # Create the text generation pipeline
32
  generator = pipeline(
33
+ "text2text-generation",
34
+ model=model,
35
  tokenizer=tokenizer,
36
+ max_length=512,
37
  num_return_sequences=1
38
  )
 
39
  logger.info(f"Successfully loaded model: {model_name}")
40
  return generator
41
+ except Exception as e:
42
+ logger.error(f"Failed to load model {model_name}: {e}")
43
 
44
+ logger.error("All model attempts failed. No model loaded.")
45
  return None
46
 
47
+ # Load the generator at startup
48
  generator = load_model()
49
 
50
  def generate_test_cases(method, url, headers, payload=""):
51
+ """
52
+ Generate detailed API test cases using a language model.
53
+ """
54
  try:
55
+ # Input validation and logging
56
+ logger.info(f"Received inputs: Method={method}, URL={url}, Headers={headers}, Payload={payload}")
57
+
 
 
 
 
 
58
  if not method or not url:
59
+ return "Error: HTTP Method and API URL are required inputs."
60
+
61
+ # Parse headers and payload as JSON
62
  try:
63
+ headers_dict = json.loads(headers) if headers.strip() else {}
64
+ payload_dict = json.loads(payload) if payload.strip() else {}
65
+ except json.JSONDecodeError as e:
66
+ return f"JSON Parsing Error: {e}"
67
 
68
+ # Prompt for the model
69
  prompt = f"""
70
+ Generate comprehensive API test cases for the following:
 
 
71
 
72
  HTTP Method: {method}
73
+ API URL: {url}
74
+ Headers: {json.dumps(headers_dict, indent=2)}
75
+ Payload: {json.dumps(payload_dict, indent=2)}
 
 
 
 
 
 
76
 
77
+ Requirements:
78
+ - Include Happy Path, Negative, and Edge Cases.
79
+ - Provide validation steps and expected results.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  """
81
 
82
+ # Ensure model is loaded
83
  if generator is None:
84
+ return "Error: No model is available for test case generation."
85
 
86
  # Generate test cases
87
+ response = generator(prompt, max_length=500, num_return_sequences=1)
88
+ generated_text = response[0]['generated_text']
 
 
 
 
 
 
89
 
90
+ logger.info("Successfully generated test cases.")
91
+ return generated_text
 
92
 
93
+ except Exception as e:
94
+ logger.error(f"Error during test case generation: {e}")
95
+ return f"Error: {e}"
96
 
97
  # Gradio Interface
98
  iface = gr.Interface(
99
  fn=generate_test_cases,
100
  inputs=[
101
+ gr.Textbox(label="HTTP Method (GET, POST, etc.)", placeholder="e.g., GET"),
102
  gr.Textbox(label="API URL", placeholder="e.g., https://api.example.com/endpoint"),
103
  gr.Textbox(label="Headers (JSON format)", placeholder='e.g., {"Content-Type": "application/json"}'),
104
  gr.Textbox(label="Payload (JSON format)", placeholder='e.g., {"key": "value"}'),
105
  ],
106
  outputs="text",
107
+ title="API Test Case Generator",
108
+ description="Generate detailed API test cases using AI models."
109
  )
110
 
111
  # Main execution
112
  if __name__ == "__main__":
113
  try:
114
+ logger.info("Launching Gradio interface...")
115
  iface.launch()
116
+ except Exception as e:
117
+ logger.error(f"Failed to launch Gradio interface: {e}")