VermaPankaj123 commited on
Commit
79826d3
Β·
verified Β·
1 Parent(s): 67157ac

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -92
app.py DELETED
@@ -1,92 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from dotenv import load_dotenv
4
- from langchain_community.embeddings import HuggingFaceEmbeddings
5
- from langchain_community.vectorstores import FAISS
6
- from langchain_community.llms import HuggingFaceHub
7
- from langchain_core.documents import Document
8
- from langchain.prompts import PromptTemplate
9
- from langchain.chains import LLMChain
10
- from langchain_community.tools.tavily_search.tool import TavilySearchResults
11
-
12
- # Load environment variables
13
- load_dotenv()
14
- HUGGINGFACEHUB_API_TOKEN = os.getenv("HF_TOKEN")
15
- TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
16
-
17
- # Set API keys
18
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
19
- os.environ["TAVILY_API_KEY"] = TAVILY_API_KEY
20
-
21
- # Prompt Template
22
- prompt_template = PromptTemplate(
23
- input_variables=["context", "user_story"],
24
- template="""You are a QA expert. Based on the context below and the given user story, write a detailed list of test cases.
25
-
26
- Context:
27
- {context}
28
-
29
- User Story:
30
- {user_story}
31
-
32
- Test Cases:"""
33
- )
34
-
35
- # Load knowledge from RAG (local file)
36
- def load_rag_knowledge():
37
- with open("rag_knowledge_base.txt", "r", encoding="utf-8") as file:
38
- content = file.read()
39
- docs = [Document(page_content=content)]
40
- embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
41
- vector_store = FAISS.from_documents(docs, embeddings)
42
- return vector_store.similarity_search("test case generation", k=1)
43
-
44
- # Tavily search
45
- def tavily_search(query):
46
- search = TavilySearchResults(k=1)
47
- results = search.run(query)
48
- return results[0]['content'] if results else "No relevant results from Tavily."
49
-
50
- # LLM call with combined context
51
- def call_llm_with_context(context, user_story):
52
- llm = HuggingFaceHub(
53
- repo_id="mistralai/Mistral-7B-Instruct-v0.3",
54
- model_kwargs={"temperature": 0.7, "max_new_tokens": 500}
55
- )
56
- chain = LLMChain(llm=llm, prompt=prompt_template)
57
- return chain.run({"context": context, "user_story": user_story})
58
-
59
- # Generate test cases pipeline
60
- def generate_test_cases(user_story):
61
- rag_docs = load_rag_knowledge()
62
- rag_text = "\n".join([doc.page_content for doc in rag_docs])
63
- tavily_text = tavily_search(user_story)
64
- full_context = f"{rag_text}\n\n{tavily_text}"
65
- test_cases = call_llm_with_context(full_context, user_story)
66
- return rag_text.strip(), tavily_text.strip(), test_cases.strip()
67
-
68
- # Gradio handler
69
- def handle_generate(user_story):
70
- rag, tavily, result = generate_test_cases(user_story)
71
- return rag, tavily, result
72
-
73
- # ----------------- Gradio UI -----------------
74
- with gr.Blocks() as demo:
75
- gr.Markdown("# πŸ§ͺ TechTales TestCaseGenerator using RAG + Tavily + Mistral + LangChain - Developed by Pankaj Kumar")
76
- gr.Markdown("πŸš€ Enter a user story below to generate test cases using your knowledge base and Tavily search.")
77
-
78
- user_input = gr.Textbox(label="πŸ“ Enter User Story", lines=4, placeholder="As a user, I want to...")
79
-
80
- btn = gr.Button("πŸ” Generate Test Cases")
81
-
82
- rag_output = gr.Textbox(label="πŸ“š Knowledge from RAG File", lines=8)
83
- tavily_output = gr.Textbox(label="🌐 Knowledge from Tavily Search", lines=8)
84
- result_output = gr.Textbox(label="βœ… Final Test Cases", lines=12)
85
-
86
- btn.click(
87
- handle_generate,
88
- inputs=[user_input],
89
- outputs=[rag_output, tavily_output, result_output]
90
- )
91
-
92
- demo.launch()