Test / app.py
Namitg02's picture
Update app.py
d12676c verified
raw
history blame
2.85 kB
from datasets import load_dataset
dataset = load_dataset("Namitg02/Test")
print(dataset)
from langchain.docstore.document import Document as LangchainDocument
#RAW_KNOWLEDGE_BASE = [LangchainDocument(page_content=["dataset"])]
from langchain.text_splitter import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15,separators=["\n\n", "\n", " ", ""])
#docs = splitter.split_documents(RAW_KNOWLEDGE_BASE)
docs = splitter.create_documents(str(dataset))
from langchain_community.embeddings import HuggingFaceEmbeddings
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# embeddings = embedding_model.encode(docs)
from langchain_community.vectorstores import Chroma
persist_directory = 'docs/chroma/'
vectordb = Chroma.from_documents(
documents=docs,
embedding=embedding_model,
persist_directory=persist_directory
)
#docs_ss = vectordb.similarity_search(question,k=3)
# Create placeholders for the login form widgets using st.empty()
#user_input_placeholder = st.empty()
#pass_input_placeholder = st.empty()
#from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
from langchain.prompts import PromptTemplate
#template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer.
#{You are a helpful dietician}
#Question: {question}
#Helpful Answer:"""
#QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(
memory_key="chat_history",
return_messages=True
)
question = "How can I reverse Diabetes?"
print("template")
retriever = vectordb.as_retriever(
search_type="similarity", search_kwargs={"k": 2}
)
#from langchain.chains import RetrievalQA
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain import hub
READER_MODEL="HuggingFaceH4/zephyr-7b-beta"
#HuggingFaceH4/zephyr-7b-beta
#qa = ConversationalRetrievalChain.from_llm(llm=READER_MODEL,retriever=retriever,memory=memory)
#qa = RetrievalQA.from_chain_type(llm=READER_MODEL,chain_type="map_reduce",retriever=retriever,verbose=True)
retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
combine_docs_chain = create_stuff_documents_chain(
READER_MODEL, retrieval_qa_chat_prompt
)
qa = create_retrieval_chain(retriever, combine_docs_chain)
result = qa(question)
import gradio as gr
gr.load("READER_MODEL").launch()
#result = ({"query": question})
print("qa")