ritepaul's picture
Add initial app and dependencies
10110b5
from llama_cpp.llama import Llama
import streamlit as st
# Load Model
try:
llm = Llama.from_pretrained(
repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
filename="*q4.gguf",
n_gpu_layers=0,
n_ctx=2048,
verbose=False
)
except Exception as e:
st.error(f"Error loading model: {e}")
llm = None
# Streamlit App
st.title("JUnit Test Case Generator")
st.write("Generate JUnit test cases for Java Method using Generative AI.")
# Text Area for Input
java_method = st.text_area("Enter Java Method", height=300)
# Generate Button
if st.button("Generate JUnit Test Cases"):
if llm:
if java_method.strip():
prompt = f"Write JUnit test cases for this function. Provide only the Java code without any explanation.\n\n{java_method}"
try:
# Generate test cases
response = llm.create_chat_completion(
messages=[
{"role": "user", "content": prompt}
],
response_format={"type": "text"},
temperature=0.3,
top_k=30
)
output = response['choices'][0]['message']["content"]
st.text_area("Generated Test Cases", output, height=300)
except Exception as e:
st.error(f"Error generating test cases: {e}")
else:
st.warning("Please enter a valid Java Method.")
else:
st.error("Model not loaded. Please check the setup.")