import streamlit as st from transformers import AutoModelForSequenceClassification, AutoTokenizer import torch # Load the model and tokenizer model_name = "acorreal/phi3-project-management" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) # Ensure the model is on the correct device and using the right dtype device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) model.eval() # Set model to evaluation mode # Streamlit app st.title('Project Management Educational Tutor') st.write('This app uses the "acorreal/phi3-project-management" model to provide insights on project management topics.') user_input = st.text_area("Enter your project management question or topic here:") if st.button('Get Response'): if user_input: # Tokenize the input and move it to the correct device inputs = tokenizer(user_input, return_tensors="pt").to(device) # Ensure inputs are in the correct dtype inputs = {k: v.to(dtype=torch.float32 if model.dtype == torch.float32 else torch.float16) for k, v in inputs.items()} with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits predicted_class_id = logits.argmax().item() st.write(f"Predicted class ID: {predicted_class_id}") else: st.write("Please enter a question or topic to get a response.")