Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,23 +2,42 @@ import streamlit as st
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
-
# Load
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
9 |
|
10 |
def get_response(user_input):
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
with torch.no_grad():
|
13 |
-
output = model.generate(input_ids, max_length=
|
14 |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
15 |
return response
|
16 |
|
17 |
# Streamlit UI
|
18 |
-
st.title("Study Buddy Chatbot
|
19 |
-
st.write("Ask a question or type a topic, and I'll help you learn!")
|
20 |
|
21 |
user_input = st.text_input("Type your question or topic:")
|
22 |
if user_input:
|
23 |
response = get_response(user_input)
|
24 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
import torch
|
4 |
|
5 |
+
# Load the Mistral-7B-Instruct model
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
|
7 |
+
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1", torch_dtype=torch.float16, device_map="auto")
|
8 |
+
|
9 |
+
# Initialize conversation history if not present
|
10 |
+
if "conversation" not in st.session_state:
|
11 |
+
st.session_state.conversation = []
|
12 |
|
13 |
def get_response(user_input):
|
14 |
+
"""Generate a thoughtful response that includes a follow-up question."""
|
15 |
+
history = "\n".join(st.session_state.conversation[-5:]) # Keep only the last 5 turns
|
16 |
+
prompt = (
|
17 |
+
f"You are a knowledgeable study coach. Engage the student in conversation. "
|
18 |
+
f"Ask open-ended questions to deepen understanding. Provide feedback and encourage explanations.\n\n"
|
19 |
+
f"Previous conversation:\n{history}\n\n"
|
20 |
+
f"Student: {user_input}\n"
|
21 |
+
f"Coach: "
|
22 |
+
)
|
23 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
24 |
with torch.no_grad():
|
25 |
+
output = model.generate(input_ids, max_length=300)
|
26 |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
27 |
return response
|
28 |
|
29 |
# Streamlit UI
|
30 |
+
st.title("π Study Buddy Chatbot")
|
31 |
+
st.write("Ask a question or type a topic, and I'll help you learn interactively!")
|
32 |
|
33 |
user_input = st.text_input("Type your question or topic:")
|
34 |
if user_input:
|
35 |
response = get_response(user_input)
|
36 |
+
st.session_state.conversation.append(f"Student: {user_input}")
|
37 |
+
st.session_state.conversation.append(f"Coach: {response}")
|
38 |
+
st.write("π€ Coach:", response)
|
39 |
+
|
40 |
+
# Display conversation history
|
41 |
+
st.subheader("Conversation History")
|
42 |
+
for chat in st.session_state.conversation[-10:]:
|
43 |
+
st.write(chat)
|