Spaces:
Sleeping
Sleeping
import streamlit as st | |
from langchain_huggingface import HuggingFaceEndpoint | |
from langchain.prompts import PromptTemplate | |
from langchain.chains import LLMChain | |
from langchain.memory import ConversationBufferMemory | |
import os | |
# Set up your Hugging Face API token | |
sec_key = os.getenv('HUGGINGFACE_API_TOKEN') | |
os.environ['HUGGINGFACE_API_TOKEN'] = sec_key | |
# Define your Hugging Face model | |
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1" | |
llm = HuggingFaceEndpoint(repo_id=repo_id, temperature=0.7) | |
# Define the prompt template | |
template = """The following is a conversation between a user and an AI assistant. | |
history:{history} | |
Final Message by Human: {user_input} | |
Final Message by AI: """ | |
prompt = PromptTemplate( | |
template=template, | |
input_variables=["history", "user_input"], | |
) | |
# Initialize memory | |
memory = ConversationBufferMemory() | |
memory.save_context({"input": "I need some help"}, {"output": "ok! how can i help you."}) | |
# Create the LLM chain | |
llm_chain = LLMChain( | |
prompt=prompt, | |
llm=llm, | |
memory=memory | |
) | |
def generate_response(user_input): | |
response = llm_chain.invoke({"history":memory.chat_memory.messages, 'user_input':user_input}) | |
return response | |
# Streamlit app | |
st.title("AI Chatbot") | |
st.write("Welcome to the AI Chatbot! Ask anything you like.") | |
# User input | |
user_input = st.text_input("You:", key="input") | |
if st.button("Send"): | |
if user_input: | |
# Generate response | |
response = generate_response(user_input) | |
response_text = response['text'] | |
# Display the response | |
st.text_area("ChatBot:", response_text, height=100) | |
st.write('History:') | |
st.write(response['history']) | |