KingLLM commited on
Commit
4974a6c
Β·
verified Β·
1 Parent(s): 8be7c76

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/Color_Atlas_of_Hematology__Practical_Microsc.pdf filter=lfs diff=lfs merge=lfs -text
37
+ data/Pathophysiology[[:space:]]of[[:space:]]Blood[[:space:]]Disorders[[:space:]]([[:space:]]PDFDrive[[:space:]]).pdf filter=lfs diff=lfs merge=lfs -text
38
+ vectorstore/db_faiss/index.faiss filter=lfs diff=lfs merge=lfs -text
chainlit.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Hematologist AI Assistant πŸ©ΈπŸ’‰
2
+
3
+ Welcome to the Hematologist AI Assistant! This AI assistant is designed to help users with inquiries related to hematology, including blood disorders, treatments, and medical advice.
4
+
5
+ ## Features
6
+ πŸ” **Question Answering:** Ask the AI assistant questions related to hematology, and it will provide relevant answers based on its knowledge base.
7
+
8
+ πŸ’¬ **Chat Interface:** Engage in a conversation with the AI assistant through a user-friendly chat interface.
9
+
10
+ πŸ“š **Knowledge Base:** The AI assistant is equipped with a comprehensive knowledge base on various hematological topics, ensuring accurate and helpful responses.
11
+
12
+ πŸ”’ **Privacy:** Your conversations with the AI assistant are kept private and confidential, adhering to strict privacy and security protocols.
13
+
14
+ ## How to Use
15
+ 1. **Accessing the AI Assistant:** Simply visit the web interface provided or integrate the AI assistant into your application using the provided API.
16
+
17
+ 2. **Asking Questions:** Type your questions or inquiries into the chat interface and press the send button. The AI assistant will process your query and provide a response.
18
+
19
+ 3. **Exploring Topics:** Feel free to explore different topics related to hematology by asking questions or engaging in conversations with the AI assistant.
20
+
21
+ ## Examples
22
+ - "What are the symptoms of anemia?"
23
+ - "How is leukemia diagnosed?"
24
+ - "Can you explain the treatment options for sickle cell disease?"
25
+ - "What are the risk factors for thrombocytopenia?"
26
+
27
+ ## Support
28
+ If you encounter any issues or have any questions about the Hematologist AI Assistant, please feel free to [contact us](mailto:[email protected]).
29
+
30
+
31
+ Happy chatting! πŸ©ΈπŸ’¬
data/Color_Atlas_of_Hematology__Practical_Microsc.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aec234780c9ae39807c8c2e2da338ae863940fa8409a8e61c8119523af6c816
3
+ size 5609324
data/Pathophysiology of Blood Disorders ( PDFDrive ).pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83cb77dc79543e6f11cad423d6071d1cb5ca22fe5dab8965494720cad442419c
3
+ size 40315767
ingest.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.embeddings import HuggingFaceEmbeddings
2
+ from langchain_community.vectorstores import FAISS
3
+ from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
4
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
5
+
6
+ DATA_PATH = 'data/'
7
+ DB_FAISS_PATH = 'vectorstore/db_faiss'
8
+
9
+ # Create vector database
10
+ def create_vector_db():
11
+ loader = DirectoryLoader(DATA_PATH,
12
+ glob='*.pdf',
13
+ loader_cls=PyPDFLoader)
14
+
15
+ documents = loader.load()
16
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
17
+ chunk_overlap=50)
18
+ texts = text_splitter.split_documents(documents)
19
+
20
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
21
+ model_kwargs={'device': 'cpu'})
22
+
23
+ db = FAISS.from_documents(texts, embeddings)
24
+ db.save_local(DB_FAISS_PATH)
25
+
26
+ if __name__ == "__main__":
27
+ create_vector_db()
28
+
llama-2-7b-chat.ggmlv3.q8_0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bfdde943555c78294626a6ccd40184162d066d39774bd2c98dae24943d32cc3
3
+ size 7160799872
model.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_community.document_loaders import PyPDFLoader, DirectoryLoader
2
+ from langchain.prompts import PromptTemplate
3
+ from langchain_community.embeddings import HuggingFaceEmbeddings
4
+ from langchain_community.vectorstores import FAISS
5
+ from langchain_community.llms import CTransformers
6
+ from langchain.chains import RetrievalQA
7
+ from sentence_transformers import SentenceTransformer
8
+ import chainlit as cl
9
+
10
+ DB_FAISS_PATH = 'vectorstore/db_faiss'
11
+
12
+ custom_prompt_template = """Use the following pieces of information to answer the user's question.
13
+ If you don't know the answer, just say that you don't know, don't try to make up an answer.
14
+
15
+ Context: {context}
16
+ Question: {question}
17
+
18
+ Only return the helpful answer below and nothing else.
19
+ Helpful answer:
20
+ """
21
+
22
+ def set_custom_prompt():
23
+ """
24
+ Prompt template for QA retrieval for each vectorstore
25
+ """
26
+ prompt = PromptTemplate(template=custom_prompt_template,
27
+ input_variables=['context', 'question'])
28
+ return prompt
29
+
30
+ #Retrieval QA Chain
31
+ def retrieval_qa_chain(llm, prompt, db):
32
+ qa_chain = RetrievalQA.from_chain_type(llm=llm,
33
+ chain_type='stuff',
34
+ retriever=db.as_retriever(search_kwargs={'k': 2}),
35
+ return_source_documents=True,
36
+ chain_type_kwargs={'prompt': prompt}
37
+ )
38
+ return qa_chain
39
+
40
+ #Loading the model
41
+ def load_llm():
42
+ # Load the locally downloaded model here
43
+ llm = CTransformers(
44
+ model = "TheBloke/Llama-2-7B-Chat-GGML",
45
+ model_type="llama",
46
+ max_new_tokens = 1024,
47
+ temperature = 0.01
48
+ )
49
+ return llm
50
+
51
+ #QA Model Function
52
+ def qa_bot():
53
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
54
+ model_kwargs={'device': 'cpu'})
55
+ db = FAISS.load_local(DB_FAISS_PATH,embeddings,allow_dangerous_deserialization=True)
56
+ llm = load_llm()
57
+ qa_prompt = set_custom_prompt()
58
+ qa = retrieval_qa_chain(llm, qa_prompt, db)
59
+
60
+ return qa
61
+
62
+ #output function
63
+ def final_result(query):
64
+ qa_result = qa_bot()
65
+ response = qa_result({'query': query})
66
+ return response
67
+
68
+ #chainlit code
69
+ @cl.on_chat_start
70
+ async def start():
71
+ chain = qa_bot()
72
+ msg = cl.Message(content="Starting the bot...")
73
+ await msg.send()
74
+ msg.content = "Hi, Welcome to Medical Bot. What is your query?"
75
+ await msg.update()
76
+
77
+ cl.user_session.set("chain", chain)
78
+
79
+
80
+ @cl.on_message
81
+ async def main(message: cl.Message):
82
+ chain = cl.user_session.get("chain")
83
+ cb = cl.AsyncLangchainCallbackHandler(
84
+ stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
85
+ )
86
+ cb.answer_reached = True
87
+ res = await chain.acall(message.content, callbacks=[cb])
88
+ answer = res["result"]
89
+ #sources = res["source_documents"]
90
+ await cl.Message(content=answer).send()
vectorstore/db_faiss/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ca20520da7c0a6bee9159af1d93a28f1509708c0ea46a9f2e7267e0862c0db4
3
+ size 4457517
vectorstore/db_faiss/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ef6aec5c8f60cb7c723cf9b0ad176351b20a02c7bf353ad706ce7d469b909cf
3
+ size 1499176