Namitg02 commited on
Commit
4445973
·
verified ·
1 Parent(s): 1f8547b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -29
app.py CHANGED
@@ -56,25 +56,12 @@ retriever = vectordb.as_retriever(
56
  search_type="similarity", search_kwargs={"k": 2}
57
  )
58
 
59
- #from langchain.chains import RetrievalQA
60
- from langchain_core.prompts import ChatPromptTemplate
61
 
62
- from langchain.chains.combine_documents import create_stuff_documents_chain
63
- #from langchain import hub
64
- from langchain.chains import create_retrieval_chain
65
 
66
- #from langchain_community.llms import Ollama
67
- #READER_MODEL=Ollama(model="meta-llama/Meta-Llama-Guard-2-8B")
68
 
69
- from transformers import AutoModelForSeq2SeqLM, BitsAndBytesConfig
70
- lll_model = AutoModelForSeq2SeqLM.from_pretrained("unsloth/llama-3-8b-bnb-4bit",low_cpu_mem_usage=True,max_shard_size="1GB")
71
-
72
- #READER_MODEL = "HuggingFaceH4/zephyr-7b-beta"
73
-
74
- #qa = ConversationalRetrievalChain.from_llm(llm=READER_MODEL,retriever=retriever,memory=memory)
75
- #qa = RetrievalQA.from_chain_type(llm=READER_MODEL,retriever=retriever)
76
-
77
- #retrieval_qa_chat_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
78
  from langchain_core.messages import SystemMessage
79
  from langchain_core.prompts import HumanMessagePromptTemplate
80
 
@@ -89,17 +76,7 @@ qa_chat_prompt = ChatPromptTemplate.from_messages(
89
  ]
90
  )
91
 
92
- docs_chain = create_stuff_documents_chain(
93
- lll_model, qa_chat_prompt
94
- )
95
- retrieval_chain = create_retrieval_chain(retriever, docs_chain)
96
- response = retrieval_chain.invoke({"context": "how can I reverse diabetes?"})
97
- print(response["answer"])
98
-
99
-
100
- #result = qa(question)
101
- #import gradio as gr
102
- #gr.load("lll_model").launch()
103
 
104
- #result = ({"query": question})
105
- #print("qa")
 
56
  search_type="similarity", search_kwargs={"k": 2}
57
  )
58
 
 
 
59
 
60
+ from transformers import pipeline
 
 
61
 
62
+ llm_model = "HuggingFaceH4/zephyr-7b-beta"
63
+ pipe = pipeline(task="text-generation",llm_model,retriever = retriever)
64
 
 
 
 
 
 
 
 
 
 
65
  from langchain_core.messages import SystemMessage
66
  from langchain_core.prompts import HumanMessagePromptTemplate
67
 
 
76
  ]
77
  )
78
 
79
+ chain = qa_chat_prompt | pipe
 
 
 
 
 
 
 
 
 
 
80
 
81
+ import gradio as gr
82
+ gr.load("lll_model").launch()