File size: 1,180 Bytes
ebab1a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import os
os.environ["HUGGINGFACEHUB_API_TOKEN"] = ""
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from langchain_community.llms import HuggingFaceHub
template = """You are a friendly chatbot engaging in a conversation with a human.
Previous conversation:
{chat_history}
New human question: {question}
Response:"""
def get_pipeline(model_name):
llm = HuggingFaceHub(
repo_id=model_name,
task="text-generation",
model_kwargs={
"max_new_tokens": 250,
"top_k": 30,
"temperature": 0.1,
"repetition_penalty": 1.03,
},
)
return llm
chatbot = get_pipeline("mistralai/Mistral-7B-Instruct-v0.2")
memory = ConversationBufferMemory(memory_key="chat_history")
prompt_template = PromptTemplate.from_template(template)
conversation = LLMChain(llm=chatbot, prompt=prompt_template, verbose=True, memory=memory)
while True:
question = input("You: ")
response = conversation({"question": question})
print("-" * 50)
print(response)
print(response["text"])
print("-" * 50)
print() |