Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Load GPT-2 model and tokenizer | |
model_name = "gpt2" # You can replace with a different version of GPT-2 if needed | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# Function to generate a response from GPT-2 | |
def generate_response(prompt): | |
# Only generate a response based on the given prompt | |
inputs = tokenizer(prompt, return_tensors="pt") | |
output = model.generate(inputs['input_ids'], max_length=150) | |
response = tokenizer.decode(output[0], skip_special_tokens=True) | |
return response | |
# Streamlit UI | |
st.title("GPT-2 Data Structures Mentor") | |
# Instruction for the chatbot role | |
st.write("This chatbot is your mentor to help you with learning Data Structures. Ask questions about arrays, linked lists, stacks, queues, trees, graphs, and other related topics!") | |
# Text input for the user prompt | |
user_input = st.text_input("You:", "") | |
if user_input: | |
# Adding context to the prompt, but only once for the first input | |
prompt = f"You are a mentor teaching data structures. Answer the following question: {user_input}" | |
response = generate_response(prompt) | |
st.text_area("Mentor's Response:", value=response, height=200, disabled=True) | |