File size: 2,578 Bytes
26175df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import os
import subprocess
from dotenv import load_dotenv
load_dotenv()
try:
    os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
    PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
except:
    PINECONE_API_KEY = subprocess.check_output(["bash", "-c", "echo ${{ secrets.PINECONE_API_KEY }}"]).decode("utf-8").strip()


from langchain.embeddings import HuggingFaceEmbeddings
import pinecone
import torch
from langchain import PromptTemplate, LLMChain,HuggingFacePipeline
from langchain.vectorstores import Pinecone
from langchain.chains.question_answering import load_qa_chain
from langchain.chains import RetrievalQA
from transformers import pipeline

def get_llm(model_name,pinecone_index,llm):
    # model_name = "bert-large-uncased" #"t5-large"
    model_kwargs = {'device': 'cuda' if torch.cuda.is_available() else 'cpu'}

    embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs)


    pinecone.init(
        api_key=PINECONE_API_KEY,
        environment="us-east-1-aws"
    )

    index = pinecone.Index(pinecone_index)
    print(index.describe_index_stats())

    docsearch = Pinecone(index, embeddings.embed_query,"text")

    # print("About to load the model")

    instruct_pipeline = pipeline(model=llm, torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto", 
        return_full_text=True, do_sample=False, max_new_tokens=128)
    llm = HuggingFacePipeline(pipeline=instruct_pipeline)
    # print("Loaded the LLM")

    # print("Prompting")

    template = """Context: {context}

    Question: {question}

    Answer: Let's go step by step."""

    prompt = PromptTemplate(template=template, input_variables=["question","context"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    return llm_chain, docsearch


if __name__ == "__main__":
    model_name = "bert-large-uncased"
    pinecone_index = "bert-large-uncased"
    llm = "databricks/dolly-v2-3b"
    llm_chain, docsearch = get_llm(model_name,pinecone_index,llm)
    print(":"*40)
    questions = ["what is the name of the first Hindi newspaper published in Bihar?",
                "what is the capital of Bihar?",
                "Brief about the Gupta Dynasty"]
    for question in questions:
        context = docsearch.similarity_search(question, k=3,metadata=False)
        content = ""
        for i in context:
            content= content + f"{i.__dict__['page_content']}"
        print(f"{question}")
        response = llm_chain.predict(question=question,context=content)
        print(f"{response}\n{'--'*25}")