File size: 1,104 Bytes
f19e9c2
a5c729e
 
 
f19e9c2
 
a5c729e
 
f19e9c2
a5c729e
 
f19e9c2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
from langchain.llms import CTransformers
# from langchain import PromptTemplate, LLMChain
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

ggufmodel = f"TheBloke/Llama-2-7B-Chat-GGUF"
ggufmodelfile = f"llama-2-7b-chat.Q5_K_M.gguf"

# llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML", model_file = 'llama-2-7b-chat.ggmlv3.q2_K.bin', callbacks=[StreamingStdOutCallbackHandler()])
llm = CTransformers(model=ggufmodel, model_file = ggufmodelfile, callbacks=[StreamingStdOutCallbackHandler()])


template = """
[INST] <<SYS>>
You are a helpful developed by Mohammed Vasim, respectful and honest assistant. Your answers are always brief.
<</SYS>>
{text}[/INST]
"""

prompt = PromptTemplate(template=template, input_variables=["text"])

# llm_chain = LLMChain(prompt=prompt, llm=llm)

def build_chain(llm=llm, prompt=prompt, template=template):
    prompt = PromptTemplate(template=template, input_variables=["text"])
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    return llm_chain