ryanpdwyer's picture
Updated to use token
b195166
raw
history blame
1.81 kB
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Retrieve the Hugging Face token from environment variables
hf_token = os.environ.get("HF_TOKEN")
if not hf_token:
st.error("Hugging Face token not found. Please add your HF_TOKEN to the Space secrets.")
st.stop()
# Load models and tokenizers
@st.cache_resource
def load_model_and_tokenizer(model_name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return model, tokenizer
model_8b, tokenizer_8b = load_model_and_tokenizer("meta-llama/Meta-Llama-3.1-8B")
model_8b_instruct, tokenizer_8b_instruct = load_model_and_tokenizer("meta-llama/Meta-Llama-3.1-8B-Instruct")
def generate_text(model, tokenizer, prompt, max_length=100):
inputs = tokenizer(prompt, return_tensors="pt")
with torch.no_grad():
outputs = model.generate(**inputs, max_length=max_length, num_return_sequences=1)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
st.title("LLaMA-3.1-8B vs LLaMA-3.1-8B-Instruct Comparison")
prompt = st.text_area("Enter your prompt:", height=100)
max_length = st.slider("Max output length:", min_value=50, max_value=500, value=100)
if st.button("Generate"):
if prompt:
col1, col2 = st.columns(2)
with col1:
st.subheader("LLaMA-3.1-8B Output")
output_8b = generate_text(model_8b, tokenizer_8b, prompt, max_length)
st.write(output_8b)
with col2:
st.subheader("LLaMA-3.1-8B-Instruct Output")
output_8b_instruct = generate_text(model_8b_instruct, tokenizer_8b_instruct, prompt, max_length)
st.write(output_8b_instruct)
else:
st.warning("Please enter a prompt.")