Md-Hakim's picture
Update app.py
2abbefb verified
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from huggingface_hub import login
# Replace 'your_token_here' with your actual Hugging Face token
# Load the model and tokenizer
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
# Set up the Streamlit app
st.title("Chat with LLaMA")
st.write("Enter your message below:")
# Create a text input for user message
user_input = st.text_input("Your Message:")
# Initialize a session state to keep track of the conversation
if 'conversation' not in st.session_state:
st.session_state.conversation = []
if st.button("Send"):
if user_input:
# Append user input to the conversation history
st.session_state.conversation.append({"role": "user", "content": user_input})
# Create input for the model
conversation_history = "\n".join(
[f"{msg['role']}: {msg['content']}" for msg in st.session_state.conversation]
)
# Tokenize the input
input_ids = tokenizer(conversation_history, return_tensors="pt").input_ids
# Generate a response
with torch.no_grad():
output = model.generate(input_ids, max_new_tokens=100, do_sample=True)
# Decode the generated response
response = tokenizer.decode(output[0], skip_special_tokens=True)
# Append model response to the conversation
st.session_state.conversation.append({"role": "assistant", "content": response})
# Display the conversation
for msg in st.session_state.conversation:
if msg['role'] == 'user':
st.write(f"**User:** {msg['content']}")
else:
st.write(f"**Assistant:** {msg['content']}")
# Optional: Clear the conversation
if st.button("Clear Conversation"):
st.session_state.conversation = []