Spaces:
Sleeping
Sleeping
update.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import logging
|
2 |
-
from transformers import pipeline
|
3 |
import gradio as gr
|
4 |
|
5 |
# Set up logging
|
@@ -9,16 +9,20 @@ logging.basicConfig(
|
|
9 |
format="%(asctime)s - %(levelname)s - %(message)s"
|
10 |
)
|
11 |
|
12 |
-
#
|
13 |
-
|
|
|
|
|
|
|
14 |
try:
|
15 |
-
|
|
|
16 |
logging.info("Model loaded successfully.")
|
17 |
except Exception as e:
|
18 |
logging.error(f"Error loading the model: {e}")
|
19 |
raise
|
20 |
|
21 |
-
#
|
22 |
def generate_test_cases(api_info):
|
23 |
logging.info(f"Generating test cases for API info: {api_info}")
|
24 |
try:
|
@@ -26,9 +30,14 @@ def generate_test_cases(api_info):
|
|
26 |
f"Generate API test cases for the following API:\n\n{api_info}\n\n"
|
27 |
f"Test cases should include:\n- Happy path\n- Negative tests\n- Edge cases"
|
28 |
)
|
29 |
-
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
32 |
except Exception as e:
|
33 |
logging.error(f"Error generating test cases: {e}")
|
34 |
return "An error occurred while generating test cases."
|
@@ -55,7 +64,7 @@ interface = gr.Interface(
|
|
55 |
gr.Textbox(label="Payload (JSON format)"),
|
56 |
],
|
57 |
outputs="text",
|
58 |
-
title="API Test Case Generator"
|
59 |
)
|
60 |
|
61 |
# Launch Gradio app
|
|
|
1 |
import logging
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
3 |
import gradio as gr
|
4 |
|
5 |
# Set up logging
|
|
|
9 |
format="%(asctime)s - %(levelname)s - %(message)s"
|
10 |
)
|
11 |
|
12 |
+
# Select the model you want to use (LLaMA, GPT, or CodeT5)
|
13 |
+
MODEL_NAME = "meta-llama/LLaMA-2-7b-hf"
|
14 |
+
|
15 |
+
# Load the model and tokenizer
|
16 |
+
logging.info(f"Loading the model: {MODEL_NAME}...")
|
17 |
try:
|
18 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
19 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
20 |
logging.info("Model loaded successfully.")
|
21 |
except Exception as e:
|
22 |
logging.error(f"Error loading the model: {e}")
|
23 |
raise
|
24 |
|
25 |
+
# Define a function to generate test cases
|
26 |
def generate_test_cases(api_info):
|
27 |
logging.info(f"Generating test cases for API info: {api_info}")
|
28 |
try:
|
|
|
30 |
f"Generate API test cases for the following API:\n\n{api_info}\n\n"
|
31 |
f"Test cases should include:\n- Happy path\n- Negative tests\n- Edge cases"
|
32 |
)
|
33 |
+
# Tokenize the input prompt
|
34 |
+
inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
|
35 |
+
# Generate output from the model
|
36 |
+
outputs = model.generate(inputs["input_ids"], max_length=512, num_return_sequences=1, do_sample=True)
|
37 |
+
# Decode the generated text
|
38 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
39 |
+
logging.info("Test cases generated successfully.")
|
40 |
+
return generated_text
|
41 |
except Exception as e:
|
42 |
logging.error(f"Error generating test cases: {e}")
|
43 |
return "An error occurred while generating test cases."
|
|
|
64 |
gr.Textbox(label="Payload (JSON format)"),
|
65 |
],
|
66 |
outputs="text",
|
67 |
+
title=f"API Test Case Generator ({MODEL_NAME})"
|
68 |
)
|
69 |
|
70 |
# Launch Gradio app
|