|
from datasets import load_dataset |
|
dataset = load_dataset("Namitg02/Test") |
|
print(dataset) |
|
|
|
from langchain.docstore.document import Document as LangchainDocument |
|
|
|
|
|
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15,separators=["\n\n", "\n", " ", ""]) |
|
|
|
docs = splitter.create_documents(str(dataset)) |
|
|
|
|
|
from langchain_community.embeddings import HuggingFaceEmbeddings |
|
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2") |
|
|
|
|
|
|
|
from langchain_community.vectorstores import Chroma |
|
persist_directory = 'docs/chroma/' |
|
|
|
vectordb = Chroma.from_documents( |
|
documents=docs, |
|
embedding=embedding_model, |
|
persist_directory=persist_directory |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
question = "How can I reverse Diabetes?" |
|
|
|
|
|
retriever = vectordb.as_retriever( |
|
search_type="similarity", search_kwargs={"k": 2} |
|
) |
|
|
|
|
|
from transformers import pipeline |
|
|
|
llm_model = "HuggingFaceH4/zephyr-7b-beta" |
|
pipe = pipeline(task="text-generation",llm_model = "models/HuggingFaceH4/zephyr-7b-beta",retriever = retriever) |
|
|
|
from langchain_core.messages import SystemMessage |
|
from langchain_core.prompts import HumanMessagePromptTemplate |
|
from langchain_core.prompts import ChatPromptTemplate |
|
|
|
qa_chat_prompt = ChatPromptTemplate.from_messages( |
|
[ |
|
SystemMessage( |
|
content=( |
|
"You are a Diabetes eductaor that provide advice to patients." |
|
) |
|
), |
|
HumanMessagePromptTemplate.from_template("{context}"), |
|
] |
|
) |
|
|
|
chain = qa_chat_prompt | pipe |
|
|
|
import gradio as gr |
|
|
|
ragdemo = gr.Interface.from_pipeline(chain) |
|
gr.Interface.from_pipeline(pipe) |
|
ragdemo.launch() |