|
from langchain.chains import RetrievalQA, ConversationalRetrievalChain |
|
from langchain.vectorstores import Chroma |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain.document_loaders import DirectoryLoader, TextLoader |
|
from transformers import pipeline |
|
from langchain.llms import HuggingFacePipeline |
|
from langchain.embeddings import HuggingFaceInstructEmbeddings |
|
import gradio as gr |
|
from InstructorEmbedding import INSTRUCTOR |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
|
|
tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base") |
|
|
|
model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base") |
|
pipe = pipeline( |
|
"text2text-generation", |
|
model=model, |
|
tokenizer=tokenizer, |
|
max_length=512, |
|
temperature=0.5, |
|
top_p=0.95, |
|
repetition_penalty=1.15 |
|
) |
|
|
|
local_llm = HuggingFacePipeline(pipeline=pipe) |
|
print(local_llm('What is the capital of Syria?')) |
|
|
|
loader = TextLoader('info.txt') |
|
document = loader.load() |
|
text_spliter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) |
|
texts = text_spliter.split_documents(document) |
|
embedding = HuggingFaceInstructEmbeddings() |
|
docsearch = Chroma.from_documents(texts, embedding, persist_directory='db') |
|
|
|
retriever = docsearch.as_retriever(search_kwargs={"k": 3}) |
|
qa_chain = RetrievalQA.from_chain_type(llm=local_llm, |
|
chain_type="map_reduce", |
|
retriever=retriever, |
|
return_source_documents=True) |
|
|
|
question = input('prompt: ') |
|
result = qa_chain({'query': question}) |
|
print('result: ', result['result']) |
|
|
|
def gradinterface(query): |
|
result = qa_chain({'query': query}) |
|
return result['result'] |
|
|
|
|
|
demo = gr.ChatInterface(fn=gradinterface, title='OUR_OWN_BOT') |
|
|
|
if __name__ == "__main__": |
|
demo.launch(show_api=False, share=True) |
|
|