Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline | |
# Load the model and tokenizer in full precision | |
model = AutoModelForCausalLM.from_pretrained("PAIXAI/Astrid-1B").to(dtype=torch.float32) | |
tokenizer = AutoTokenizer.from_pretrained("PAIXAI/Astrid-1B") | |
# Initialize the pipeline with the model and tokenizer | |
generate_text = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1) | |
# Streamlit UI | |
st.title("Astrid-1B Chatbot") | |
st.write("Test the Astrid-1B chatbot from Hugging Face!") | |
user_input = st.text_input("Enter your question:") | |
if user_input: | |
response = generate_text(user_input, min_new_tokens=2, max_new_tokens=256, do_sample=False, num_beams=1, temperature=0.3, repetition_penalty=1.2, renormalize_logits=True) | |
st.write("Response:", response[0]["generated_text"]) | |
st.write("Note: This is a simple UI for demonstration purposes. Ensure you have the required libraries installed and adjust the model parameters as needed.") | |