File size: 1,313 Bytes
7c6b19a
80e7abb
7c6b19a
38afc93
 
 
80e7abb
 
38afc93
d19bd0a
50d6a8f
38afc93
 
 
d19bd0a
 
 
38afc93
 
 
 
d19bd0a
38afc93
 
d19bd0a
38afc93
50d6a8f
38afc93
 
 
7c6b19a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load GPT-2 model and tokenizer
model_name = "gpt2"  # You can replace with a different version of GPT-2 if needed
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Function to generate a response from GPT-2
def generate_response(prompt):
    # Only generate a response based on the given prompt
    inputs = tokenizer(prompt, return_tensors="pt")
    output = model.generate(inputs['input_ids'], max_length=150)
    response = tokenizer.decode(output[0], skip_special_tokens=True)
    return response

# Streamlit UI
st.title("GPT-2 Data Structures Mentor")

# Instruction for the chatbot role
st.write("This chatbot is your mentor to help you with learning Data Structures. Ask questions about arrays, linked lists, stacks, queues, trees, graphs, and other related topics!")

# Text input for the user prompt
user_input = st.text_input("You:", "")

if user_input:
    # Adding context to the prompt, but only once for the first input
    prompt = f"You are a mentor teaching data structures. Answer the following question: {user_input}"
    response = generate_response(prompt)
    st.text_area("Mentor's Response:", value=response, height=200, disabled=True)