File size: 6,858 Bytes
0e05e52
 
f5ea2b5
0e05e52
 
 
 
 
 
 
 
 
 
 
f5ea2b5
 
0e05e52
 
 
 
 
 
 
 
 
 
 
f5ea2b5
0e05e52
 
 
 
f5ea2b5
 
 
 
0e05e52
 
f5ea2b5
0e05e52
 
 
 
f5ea2b5
0e05e52
f5ea2b5
0e05e52
 
 
f5ea2b5
 
 
 
0e05e52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5ea2b5
0e05e52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5ea2b5
 
0e05e52
 
f5ea2b5
 
0e05e52
 
 
f5ea2b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e05e52
 
 
 
f5ea2b5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
''' GRADIO APP '''

import os, json
import gradio as gr
import openai



'''
#########################
    Environment Variables
#########################
'''
# Read OpenAI API key from environment variable
openai.api_key = "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"
# openai.api_key = os.getenv("OPENAI_API_KEY")

if not openai.api_key:
    st.error("No OpenAI API key found in the environment variable OPENAI_API_KEY")


'''
#####################
    Backend Functions
#####################
'''

# conversation_tc = [{"role": "system", "content": "You are a technical and a professional QA manager, working in a technological firm. You provide test cases for a scenario. "}]
convo_py = [{"role": "system", "content": "You are a technical and a professional QA manager who specializes in Python Unittest, working in a technological firm. You should provide Python Unittest test scripts in for the test case provided"}]
convo_java = [{"role": "system", "content": "You are a technical and a professional QA manager who specializes in Java JUnit, working in a technological firm. You should provide Java JUnit test scripts in for the test case provided"}]


def generate_test_cases(ctx, topic):
    if ctx:
        conversation_tc = json.loads(ctx)
        print(conversation_tc)
    if topic:
        conversation_tc.append({"role": "user", "content": f"Generate a manual test case for the topic: {topic}"})
        print("Request to OpenAI", conversation_tc)
        response = openai.ChatCompletion.create(
              model="gpt-3.5-turbo",
              messages=conversation_tc
          )
        print("Response from OpenAI", response["usage"]["total_tokens"])
        test_cases = response["choices"][0]["message"]["content"]
        return test_cases, test_cases
    else:
        return "Please enter a topic/subject."

def generate_test_scripts(ctx, framework, test_cases):
    if ctx:
        convo_py = json.loads(ctx)
        convo_java = json.loads(ctx)
    print("TEST SCRIPT", framework)
    if framework == "Python, unittest":
        print("py")
        return generate_python_unittest(test_cases)
    elif framework == "Java, JUnit":
        print("java")
        return generate_java_junit(test_cases)
    else:
        return "Unsupported language or framework."

def generate_python_unittest(test_cases):
    convo_py.append({"role": "user", "content": f"Here is a manual test case. {test_cases}"})
    # prompt = f"Create a Python unittest test script for the following test cases:\n{test_cases}"
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=convo_py
    )
    script = response["choices"][0]["message"]["content"]
    return script

def generate_java_junit(test_cases):
    convo_java.append({"role": "user", "content": f"Here is a manual test case. {test_cases}"})
    # prompt = f"Create a Java JUnit test script for the following test cases:\n{test_cases}"
    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=convo_java
    )
    script = response["choices"][0]["message"]["content"]
    return script



'''
#####################
    Markdown Content
#####################
'''

title = """
# QA Leveraging GenAI
"""

description = '''
This tool leverages OpenAI's GPT-3.5-turbo model to automate two key aspects of quality assurance in software development: generating test cases and writing test scripts.

By providing a functional use case, you can generate detailed test cases to ensure the quality of your software. 
You can also select your preferred testing framework, and the tool will generate test scripts based on the generated test cases
<hr>
'''

howto = '''
<br>
## How to Use

**Test Case Generation**
Enter Functional usecase: Describe the functional use case for which you want to generate test cases. The more specific you are, the better the generated test cases will be.
Click on Generate Test Case button

**Test Script Generation**
Select a Test Framework: Choose the testing framework that you want the test scripts to be compatible with. Currently, you can select between Python's unittest and Java's JUnit frameworks.
---
## Results

- **Test Case**: This section will display the generated test cases based on the functional use case you provided. 

- **Test Script**: After choosing your preferred language and testing framework, this section will display the test scripts corresponding to the generated test cases.
---
'''

notices = '''
**Notice**
This tool is produced by an LLM and therefore the outputs might not be perfect. Its best to review and edit the generated test cases and scripts as necessary.
'''


'''
#####################
    Gradio Block
#####################
'''
test_case_1 = None
test_case_2 = None

with gr.Blocks() as demo:

    
    gr.Markdown(title)
    gr.Markdown(description)

    with gr.Tab("1. Text Generation"):
        with gr.Accordion("Context"):
                test_case_context = gr.Textbox(label='Functional Usecase', 
                    open=False, 
                    value='[{"role": "system", "content": "You are a technical and a professional QA manager, working in a technological firm. You provide test cases for a scenario. "}]')
        with gr.Row():
            with gr.Column():
                test_case_topic = gr.Textbox(label='Functional Usecase', value='Robotic arm to test payments using card with driver, inspector and billing device')
                tc_button = gr.Button("Generate Test Case")
            with gr.Column():
                test_case_1 = gr.Textbox(label="Test Case")

    with gr.Tab("2. Test Script Generation"):
        with gr.Accordion("Context"):
                test_script_context = gr.Textbox(label='Functional Usecase', 
                    open=False, 
                    value='[{"role": "system", "content": "You are a technical and a professional QA manager who specializes in Python Unittest, working in a technological firm. You should provide Python Unittest test scripts in for the test case provided"}]')
        with gr.Row():
            with gr.Column():
                fw = gr.Dropdown(["Python, unittest", "Java, JUnit"], label="Framework")
                test_case_2 = gr.Textbox(label="Test Case", lines=20, interactive=True)
                ts_button = gr.Button("Generate Test Script")
            with gr.Column():
                test_script = gr.Code(label="Test Script",language="python")


        tc_button.click(fn=generate_test_cases, inputs=[test_case_context,test_case_topic], outputs=[test_case_1,test_case_2], show_progress=True)
        ts_button.click(fn=generate_test_scripts, inputs=[test_script_context, fw, test_case_2], outputs=test_script, show_progress=True)

    # gr.Markdown(howto)
    gr.Markdown(notices)

demo.queue(api_open=False, max_size=5).launch(debug=True)