File size: 5,059 Bytes
341b0e8
 
db2d027
f437f2a
30b8a93
 
6bf42b4
30b8a93
f437f2a
6085a4e
6bf42b4
fe4f2dd
f437f2a
 
1d55d4a
73e234f
51c6493
f437f2a
 
eb40503
f437f2a
30b8a93
f437f2a
6bf42b4
51c6493
f437f2a
 
 
1e0339f
 
0dab96e
 
 
1e0339f
f40cccc
57eb6aa
f40cccc
57eb6aa
 
 
 
f40cccc
0dab96e
f40cccc
57eb6aa
 
 
 
 
 
 
1e0339f
0dab96e
50f7573
1e0339f
0dab96e
 
 
f40cccc
1e0339f
e69e635
 
48a5b37
 
c0b0ef7
57eb6aa
 
5f04ae4
11a5cb0
57eb6aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11a5cb0
0bf4653
1b3dd55
08f351d
a58bb60
08f351d
222a334
 
 
 
a58bb60
aff4a30
23f23be
6484e7f
aff4a30
 
7c866d5
 
 
a58bb60
5f04ae4
57eb6aa
939411c
57eb6aa
 
 
 
 
aff4a30
57eb6aa
 
 
 
1b3dd55
 
5f04ae4
087cd96
3318ce2
b374ad5
46451bb
d34b70c
4445973
fe83899
b374ad5
d34b70c
5f04ae4
69b416b
5f04ae4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
from datasets import load_dataset
dataset = load_dataset("Namitg02/Test")
print(dataset)

from langchain.docstore.document import Document as LangchainDocument

#RAW_KNOWLEDGE_BASE = [LangchainDocument(page_content=["dataset"])]

from langchain.text_splitter import RecursiveCharacterTextSplitter
splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=15,separators=["\n\n", "\n", " ", ""])
#docs = splitter.split_documents(RAW_KNOWLEDGE_BASE)
docs = splitter.create_documents(str(dataset))


from langchain_community.embeddings import HuggingFaceEmbeddings
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# embeddings = embedding_model.encode(docs)


from langchain_community.vectorstores import Chroma
persist_directory = 'docs/chroma/'

vectordb = Chroma.from_documents(
    documents=docs,
    embedding=embedding_model,
    persist_directory=persist_directory
)

#docs_ss = vectordb.similarity_search(question,k=3)

# Create placeholders for the login form widgets using st.empty()
#user_input_placeholder = st.empty()
#pass_input_placeholder = st.empty()

#from langchain_community.output_parsers.rail_parser import GuardrailsOutputParser
from langchain.prompts import PromptTemplate

template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer. 
{You are a helpful dietician}
Question: {question}
Helpful Answer:"""

#QA_CHAIN_PROMPT = PromptTemplate.from_template(template)


from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
memory = ConversationBufferMemory(
    memory_key="chat_history",
    return_messages=True
)

question = "How can I reverse Diabetes?"
#print("template")

retriever = vectordb.as_retriever(
    search_type="similarity", search_kwargs={"k": 2}
)


from transformers import pipeline
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_core.messages import SystemMessage
from langchain_core.prompts import HumanMessagePromptTemplate
from langchain_core.prompts import ChatPromptTemplate
from langchain.prompts import PromptTemplate

print("check1")


template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Use three sentences maximum. Keep the answer as concise as possible. Always say "thanks for asking!" at the end of the answer. 
{context}
Question: {question}
Helpful Answer:"""

QA_CHAIN_PROMPT = PromptTemplate(input_variables=["context", "question"],template=template)


#qa_chat_prompt = ChatPromptTemplate.from_messages(
#[
 #       SystemMessage(
  #          content=(
   #             "You are a Diabetes eductaor that provide advice to patients."
    #        )
     #   ),
      #  HumanMessagePromptTemplate.from_template("{context}"),
    #]
#)

llm_model = "microsoft/Phi-3-mini-4k-instruct"
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(llm_model,trust_remote_code=True)
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained(llm_model,trust_remote_code=True)
#llm = HuggingFaceLLM(
#    tokenizer_name="microsoft/Phi-3-mini-4k-instruct",
#    model_name="microsoft/Phi-3-mini-4k-instruct",
#)

question = "How can I reverse diabetes?"

#pipe = pipeline(model = llm_model, tokenizer = tokenizer, task = "text-generation", temperature=0.2)
docs1 = retriever.get_relevant_documents(question)
print("docs1")

from langchain.chains.question_answering import load_qa_chain

#pipe = load_qa_chain(llm=llm_model,tokenizer =tokenizer, chain_type="map_reduce")
print("check2")
qa = ConversationalRetrievalChain.from_llm(
    llm,
    retriever=retriever,
    memory=memory,
    chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}
)

#question = "How can I reverse diabetes?"
result = qa({"question": question})
print("result")
#result['answer']

#"question-answering", "conversational"

print("check3")
chain = pipe(question = "How can I reverse diabetes?",context = "Diabetes remission or reversal is a condition when a person’s HbA1c is less than 6.5% for 3 months or more without diabetes medication. For diabetes remission or reversal, people should follow the advice of their doctors and nutritionist. Weight reduction is the key point for diabetes remission or reversal, as we all know that one of the leading causes of developing diabetes is obesity and more than 82 percent are overweight. But remission does not mean that diabetes has gone away.") 
#(question = question, context = context)
print("check3A")
print(chain)[0]['generated_text'][-1]
print("check3B")
import gradio as gr
#ragdemo = gr.load("models/HuggingFaceH4/zephyr-7b-beta")
ragdemo = gr.Interface.from_pipeline(chain)

print("check4")
ragdemo.launch()
print("check5")