Spaces:
Sleeping
Sleeping
''' GRADIO APP ''' | |
import os | |
import gradio as gr | |
import openai | |
''' | |
######################### | |
Environment Variables | |
######################### | |
''' | |
# Read OpenAI API key from environment variable | |
openai.api_key = os.getenv("OPENAI_API_KEY") | |
if not openai.api_key: | |
st.error("No OpenAI API key found in the environment variable OPENAI_API_KEY") | |
''' | |
##################### | |
Backend Functions | |
##################### | |
''' | |
conversation_tc = [{"role": "system", "content": "You are a technical and a professional QA manager, working in a technological firm. You provide test cases for a scenario. "}] | |
convo_py = [{"role": "system", "content": "You are a technical and a professional QA manager who specializes in Python Unittest, working in a technological firm. You should provide Python Unittest test scripts in for the test case provided"}] | |
convo_java = [{"role": "system", "content": "You are a technical and a professional QA manager who specializes in Java JUnit, working in a technological firm. You should provide Java JUnit test scripts in for the test case provided"}] | |
def generate_test_cases(topic): | |
if topic: | |
conversation_tc.append({"role": "user", "content": f"Generate a manual test case for the topic: {topic}"}) | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=conversation_tc | |
) | |
test_cases = response["choices"][0]["message"]["content"] | |
return test_cases | |
else: | |
return "Please enter a topic/subject." | |
def generate_test_scripts(framework, test_cases): | |
print("TEST SCRIPT", framework) | |
if framework == "Python, unittest": | |
print("py") | |
return generate_python_unittest(test_cases) | |
elif framework == "Java, JUnit": | |
print("java") | |
return generate_java_junit(test_cases) | |
else: | |
return "Unsupported language or framework." | |
def generate_python_unittest(test_cases): | |
convo_py.append({"role": "user", "content": f"Here is a manual test case. {test_cases}"}) | |
# prompt = f"Create a Python unittest test script for the following test cases:\n{test_cases}" | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=convo_py | |
) | |
script = response["choices"][0]["message"]["content"] | |
return script | |
def generate_java_junit(test_cases): | |
convo_java.append({"role": "user", "content": f"Here is a manual test case. {test_cases}"}) | |
# prompt = f"Create a Java JUnit test script for the following test cases:\n{test_cases}" | |
response = openai.ChatCompletion.create( | |
model="gpt-3.5-turbo", | |
messages=convo_java | |
) | |
script = response["choices"][0]["message"]["content"] | |
return script | |
''' | |
##################### | |
Markdown Content | |
##################### | |
''' | |
title = """ | |
# QA Leveraging GenAI | |
""" | |
description = ''' | |
This tool leverages OpenAI's GPT-3.5-turbo model to automate two key aspects of quality assurance in software development: generating test cases and writing test scripts. | |
By inputting a functional use case, you can generate detailed test cases to ensure the quality of your software. | |
You can also select your preferred testing framework, and the tool will generate test scripts based on the generated test cases | |
<hr> | |
''' | |
howto = ''' | |
<br> | |
## How to Use | |
**Test Case Generation** | |
Enter Functional usecase: Describe the functional use case for which you want to generate test cases. The more specific you are, the better the generated test cases will be. | |
Click on Generate Test Case button | |
**Test Script Generation** | |
Select a Test Framework: Choose the testing framework that you want the test scripts to be compatible with. Currently, you can select between Python's unittest and Java's JUnit frameworks. | |
--- | |
## Results | |
- **Test Case**: This section will display the generated test cases based on the functional use case you provided. | |
- **Test Script**: After choosing your preferred language and testing framework, this section will display the test scripts corresponding to the generated test cases. | |
--- | |
''' | |
notices = ''' | |
**Notice** | |
This tool is produced by an LLM and therefore the outputs might not be perfect. Its best to review and edit the generated test cases and scripts as necessary. | |
''' | |
''' | |
##################### | |
Gradio Block | |
##################### | |
''' | |
test_case = None | |
with gr.Blocks() as demo: | |
gr.Markdown(title) | |
gr.Markdown(description) | |
with gr.Row() as text_to_image: | |
with gr.Column(): | |
test_case_topic = gr.Textbox(label='Functional Usecase', value='VR Headset Battery Installation', info="Descriptive usecase to generate Test Cases for") | |
tc_button = gr.Button("Generate Test Case") | |
test_case = gr.Textbox(label="Test Case") | |
with gr.Column(): | |
fw = gr.Dropdown(["Python, unittest", "Java, JUnit"], label="Framework", info="Framework to generate test scripts on"), | |
ts_button = gr.Button("Generate Test Script") | |
test_script = gr.Textbox(label="Test Script") | |
tc_button.click(fn=generate_test_cases, inputs=test_case_topic, outputs=test_case, show_progress=True) | |
ts_button.click(fn=generate_test_scripts, inputs=[fw[0], test_case], outputs=test_script, show_progress=True) | |
# gr.Markdown(howto) | |
gr.Markdown(notices) | |
demo.queue(api_open=False, max_size=5).launch() |