Spaces:
Sleeping
Sleeping
File size: 3,458 Bytes
3e7ea7a 26e663a 3e7ea7a b79836f 3e7ea7a ffa8147 26e663a ffa8147 26e663a 3e7ea7a 26e663a 3e7ea7a 47a9554 3e7ea7a 26e663a 3e7ea7a 12d9740 ffa8147 c1f0bbe 3e7ea7a 12d9740 3e7ea7a 26e663a 3e7ea7a ffa8147 26e663a 3e7ea7a c1f0bbe ffa8147 3e7ea7a c1f0bbe 3e7ea7a ffa8147 3e7ea7a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
import os
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Access the value of OPENAI_API_KEY
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
from langchain_openai import ChatOpenAI
llm_OpenAi = ChatOpenAI(model="gpt-3.5-turbo", temperature=0,)
from langchain.chat_models import ChatAnyscale
ANYSCALE_ENDPOINT_TOKEN=os.environ.get("ANYSCALE_ENDPOINT_TOKEN")
anyscale_api_key =ANYSCALE_ENDPOINT_TOKEN
llm=ChatAnyscale(anyscale_api_key=anyscale_api_key,temperature=0, model_name='mistralai/Mistral-7B-Instruct-v0.1', streaming=False)
## Create embeddings and splitter
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Create Embeddings
model_name = "BAAI/bge-large-en"
embedding = HuggingFaceBgeEmbeddings(
model_name = model_name,
# model_kwargs = {'device':'cuda'},
encode_kwargs = {'normalize_embeddings': True}
)
# Create Splitter
splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=100,
)
from langchain_community.vectorstores import FAISS
persits_directory="./faiss_Test02_500_C_BGE_large"
# persits_directory="./faiss_V03_C500_BGE_large-final"
# persits_directory="./faiss_V03_C1000_BGE_large-final"
vectorstore= FAISS.load_local(persits_directory, embedding)
# Define a custom prompt for Unser manual
from langchain.prompts import PromptTemplate
qa_template = ("""
You are the AI assistant of the IronOne Technologies which provide services for companies members and novice users with learning with ATrad Aplication .
You have provided context information below related to learning material.
Context: {context}
Given this information, please answer the question with the latest information.
If you dont know the answer say you dont know, dont try to makeup answers.
if context is not enough to answer the question, ask for more information.
if context is not related to the question, say I dont know.
give the answer with very clear structure and clear language.
each answer Must start with code word ATrad Ai(QA):
Question: {question}
answer: let me think about it...""")
qa_template2 = ("""
Welcome to IronOne Technologies' AI Assistant, designed to assist you in learning with the ATrad Application.
Context: {context}
As your AI assistant, I'm here to help you navigate through learning materials and provide guidance.
Please provide me with any questions or concerns you have regarding the ATrad Application.
If you're unsure about something or need more information, feel free to ask.
Important:-No need to mention provided document. give strictly answers.
-Give answers in a very structured manner to understand easily.
each answer Must start with code word ATrad Ai(QA):
Question: {question}
ATrad Ai(QA): Let me think about it...""")
QA_PROMPT = PromptTemplate(input_variables=["context", "question"],template=qa_template2,)
# Chain for Web
from langchain.chains import RetrievalQA
Web_qa = RetrievalQA.from_chain_type(
llm=llm_OpenAi,
chain_type="stuff",
retriever = vectorstore.as_retriever(search_kwargs={"k": 4}),
return_source_documents= True,
input_key="question",
chain_type_kwargs={"prompt": QA_PROMPT},
)
|