NEXAS commited on
Commit
7f989bf
·
verified ·
1 Parent(s): f691e8f

Update src/utils/text_qa.py

Browse files
Files changed (1) hide show
  1. src/utils/text_qa.py +91 -91
src/utils/text_qa.py CHANGED
@@ -1,91 +1,91 @@
1
- import os
2
- from typing import List
3
- from langchain_groq import ChatGroq
4
- from langchain.prompts import PromptTemplate
5
- from langchain_community.vectorstores.qdrant import Qdrant
6
- from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
7
- from qdrant_client import QdrantClient
8
- #from langchain_community.chat_models import ChatOllama
9
-
10
-
11
- #import chainlit as cl
12
- from langchain.chains import RetrievalQA
13
-
14
- # bring in our GROQ_API_KEY
15
- from dotenv import load_dotenv
16
- load_dotenv()
17
-
18
-
19
- llamaparse_api_key = os.getenv("LLAMA_CLOUD_API_KEY")
20
- qdrant_url = os.getenv("QDRANT_URL ")
21
- qdrant_api_key = os.getenv("QDRANT_API_KEY")
22
- groq_api_key = os.getenv("GROQ_API_KEY")
23
-
24
- custom_prompt_template = """Use the following pieces of information to answer the user's question.
25
- If you don't know the answer, just say that you don't know,if it is out of context say that it is out of context and also try to provide the answer and don't be rude.
26
-
27
- Context: {context}
28
- Question: {question}
29
-
30
- Only return the helpful answer below and nothing else.
31
- Helpful answer:
32
- """
33
-
34
- def set_custom_prompt():
35
- """
36
- Prompt template for QA retrieval for each vectorstore
37
- """
38
- prompt = PromptTemplate(template=custom_prompt_template,
39
- input_variables=['context', 'question'])
40
- return prompt
41
-
42
-
43
- chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768",api_key=groq_api_key) # type: ignore
44
- #chat_model = ChatGroq(temperature=0, model_name="Llama2-70b-4096")
45
- #chat_model = ChatOllama(model="llama2", request_timeout=30.0)
46
-
47
- #client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url,)
48
-
49
-
50
- def retrieval_qa_chain(llm, prompt, vectorstore):
51
- qa_chain = RetrievalQA.from_chain_type(
52
- llm=llm,
53
- chain_type="stuff",
54
- retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
55
- return_source_documents=True,
56
- chain_type_kwargs={'prompt': prompt}
57
- )
58
- return qa_chain
59
-
60
-
61
- def qa_bot(qdrant):
62
- embeddings = FastEmbedEmbeddings() # type: ignore
63
- vectorstore = qdrant
64
- llm = chat_model
65
- qa_prompt=set_custom_prompt()
66
- qa = retrieval_qa_chain(llm, qa_prompt, vectorstore)
67
- return qa
68
-
69
- #---------------------------------------------------------------------#
70
-
71
- #qdrant_cloud_api_key="your_qdrant_cloud_api_key"
72
- #qdrant_url="your_qdrant_url"
73
-
74
- #qdrant_cloud = Qdrant.from_documents(
75
- # docs,
76
- # embeddings,
77
- # url=qdrant_url,
78
- # prefer_grpc=True,
79
- # api_key=qdrant_cloud_api_key,
80
- # collection_name="qdrant_cloud_documents",
81
- #)
82
-
83
- #---------------------------------------------------------------------#
84
- #query="how to make coffee"
85
- #print(query)
86
-
87
- #chain = qa_bot()
88
- #response = chain.invoke(query,)
89
-
90
- #print(response['result'])
91
-
 
1
+ import os
2
+ from typing import List
3
+ from langchain_groq import ChatGroq
4
+ from langchain.prompts import PromptTemplate
5
+ from langchain_community.vectorstores.qdrant import Qdrant
6
+ from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
7
+ from qdrant_client import QdrantClient
8
+ #from langchain_community.chat_models import ChatOllama
9
+
10
+
11
+ #import chainlit as cl
12
+ from langchain.chains import RetrievalQA
13
+
14
+ # bring in our GROQ_API_KEY
15
+ from dotenv import load_dotenv
16
+ load_dotenv()
17
+
18
+
19
+ llamaparse_api_key = os.getenv("LLAMA_CLOUD_API_KEY")
20
+ qdrant_url = os.getenv("QDRANT_URL ")
21
+ qdrant_api_key = os.getenv("QDRANT_API_KEY")
22
+ groq_api_key = os.getenv("GROQ_API_KEY")
23
+
24
+ custom_prompt_template = """Use the following pieces of information to answer the user's question.
25
+ If you don't know the answer, just say that you don't know,if it is out of context say that it is out of context and also try to provide the answer and don't be rude.
26
+
27
+ Context: {context}
28
+ Question: {question}
29
+
30
+ Only return the helpful answer below and nothing else.
31
+ Helpful answer:
32
+ """
33
+
34
+ def set_custom_prompt():
35
+ """
36
+ Prompt template for QA retrieval for each vectorstore
37
+ """
38
+ prompt = PromptTemplate(template=custom_prompt_template,
39
+ input_variables=['context', 'question'])
40
+ return prompt
41
+
42
+
43
+ chat_model = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768",api_key=groq_api_key) # type: ignore
44
+ #chat_model = ChatGroq(temperature=0, model_name="Llama2-70b-4096")
45
+ #chat_model = ChatOllama(model="llama2", request_timeout=30.0)
46
+
47
+ #client = QdrantClient(api_key=qdrant_api_key, url=qdrant_url,)
48
+
49
+
50
+ def retrieval_qa_chain(llm, prompt, vectorstore):
51
+ qa_chain = RetrievalQA.from_chain_type(
52
+ llm=llm,
53
+ chain_type="stuff",
54
+ retriever=vectorstore.as_retriever(search_kwargs={'k': 2}),
55
+ return_source_documents=True,
56
+ chain_type_kwargs={'prompt': prompt}
57
+ )
58
+ return qa_chain
59
+
60
+
61
+ def qa_bot(qdrant):
62
+ embeddings = FastEmbedEmbeddings() # type: ignore
63
+ vectorstore = qdrant
64
+ llm = chat_model
65
+ qa_prompt=set_custom_prompt()
66
+ qa = retrieval_qa_chain(llm, qa_prompt, vectorstore)
67
+ return qa
68
+
69
+ #---------------------------------------------------------------------#
70
+
71
+ #qdrant_cloud_api_key="your_qdrant_cloud_api_key"
72
+ #qdrant_url="your_qdrant_url"
73
+
74
+ #qdrant_cloud = Qdrant.from_documents(
75
+ # docs,
76
+ # embeddings,
77
+ # url=qdrant_url,
78
+ # prefer_grpc=True,
79
+ # api_key=qdrant_cloud_api_key,
80
+ # collection_name="qdrant_cloud_documents",
81
+ #)
82
+
83
+ #---------------------------------------------------------------------#
84
+ #query="how to make coffee"
85
+ #print(query)
86
+
87
+ chain = qa_bot()
88
+ #response = chain.invoke(query,)
89
+
90
+ #print(response['result'])
91
+