Keira James commited on
Commit
38afc93
·
1 Parent(s): 0d42a18

updating again

Browse files
Files changed (2) hide show
  1. app.py +18 -47
  2. requirements.txt +0 -1
app.py CHANGED
@@ -1,59 +1,30 @@
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
4
 
5
- # Define the model name
6
- model_name = "Qwen/Qwen2.5-1.5B-Instruct"
7
-
8
- # Load the model and tokenizer
9
- model = AutoModelForCausalLM.from_pretrained(
10
- model_name,
11
- torch_dtype="auto",
12
- device_map="auto"
13
- )
14
  tokenizer = AutoTokenizer.from_pretrained(model_name)
15
 
16
- # Function to generate a response
17
  def generate_response(prompt):
18
- if not prompt:
19
- return "Please enter a prompt."
20
-
21
- # Create the messages for chat-based model
22
- messages = [
23
- {"role": "system", "content": "You are a helpful assistant."},
24
- {"role": "user", "content": prompt}
25
- ]
26
-
27
- # Format the input for the model
28
- text = tokenizer.apply_chat_template(
29
- messages,
30
- tokenize=False,
31
- add_generation_prompt=True
32
- )
33
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
34
-
35
- # Generate model response
36
- generated_ids = model.generate(
37
- **model_inputs,
38
- max_new_tokens=512
39
- )
40
-
41
- # Decode and return the response
42
- generated_ids = [
43
- output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
44
- ]
45
-
46
- response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
47
-
48
  return response
49
 
50
  # Streamlit UI
51
- st.title("AI Text Generator")
 
 
 
52
 
53
- prompt = st.text_area("Enter your prompt:", placeholder="Type your question or prompt here...")
 
54
 
55
- if st.button("Generate Response"):
56
- with st.spinner("Generating response..."):
57
- response = generate_response(prompt)
58
- st.text_area("Model Response:", value=response, height=200, disabled=True)
 
59
 
 
1
  import streamlit as st
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
 
4
+ # Load GPT-2 model and tokenizer
5
+ model_name = "gpt2" # You can replace with a different version of GPT-2 if needed
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
 
 
 
 
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
+ # Function to generate a response from GPT-2
10
  def generate_response(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt")
12
+ output = model.generate(inputs['input_ids'], max_length=150)
13
+ response = tokenizer.decode(output[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  return response
15
 
16
  # Streamlit UI
17
+ st.title("GPT-2 Data Structures Mentor")
18
+
19
+ # Instruction for the chatbot role
20
+ st.write("This chatbot is your mentor to help you with learning Data Structures. Ask questions about arrays, linked lists, stacks, queues, trees, graphs, and other related topics!")
21
 
22
+ # Text input for the user prompt
23
+ user_input = st.text_input("You:", "")
24
 
25
+ if user_input:
26
+ # Adding context to the prompt, making GPT-2 respond like a mentor
27
+ prompt = f"You are a mentor teaching data structures. Answer the following question: {user_input}"
28
+ response = generate_response(prompt)
29
+ st.text_area("Mentor's Response:", value=response, height=200, disabled=True)
30
 
requirements.txt CHANGED
@@ -1,4 +1,3 @@
1
  streamlit
2
  transformers==4.31.0
3
  torch
4
- accelerate>=0.26.0
 
1
  streamlit
2
  transformers==4.31.0
3
  torch