Grandediw commited on
Commit
decbb4f
·
1 Parent(s): 53dbe56
Files changed (1) hide show
  1. app.py +36 -8
app.py CHANGED
@@ -1,6 +1,26 @@
1
  import streamlit as st
 
2
 
3
- st.title("Echo Bot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  # Initialize chat history
6
  if "messages" not in st.session_state:
@@ -11,16 +31,24 @@ for message in st.session_state.messages:
11
  with st.chat_message(message["role"]):
12
  st.markdown(message["content"])
13
 
14
- # React to user input
15
- if prompt := st.chat_input("What is up?"):
16
- # Display user message in chat message container
17
  st.chat_message("user").markdown(prompt)
18
- # Add user message to chat history
19
  st.session_state.messages.append({"role": "user", "content": prompt})
20
 
21
- response = f"Echo: {prompt}"
22
- # Display assistant response in chat message container
 
 
 
 
 
 
 
 
 
 
23
  with st.chat_message("assistant"):
24
  st.markdown(response)
25
- # Add assistant response to chat history
26
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
+ st.set_page_config(page_title="Hugging Face Chatbot", layout="centered")
5
+ st.title("Hugging Face Chatbot")
6
+
7
+ @st.cache_resource
8
+ def load_model():
9
+ # Load tokenizer and model from Hugging Face
10
+ tokenizer = AutoTokenizer.from_pretrained("Grandediw/lora_model_finetuned", use_fast=True)
11
+ model = AutoModelForCausalLM.from_pretrained("Grandediw/lora_model_finetuned", device_map="auto", trust_remote_code=True)
12
+ chat_pipeline = pipeline(
13
+ "text-generation",
14
+ model=model,
15
+ tokenizer=tokenizer,
16
+ max_length=512,
17
+ temperature=0.7,
18
+ top_p=0.9,
19
+ pad_token_id=tokenizer.eos_token_id
20
+ )
21
+ return chat_pipeline
22
+
23
+ chat_pipeline = load_model()
24
 
25
  # Initialize chat history
26
  if "messages" not in st.session_state:
 
31
  with st.chat_message(message["role"]):
32
  st.markdown(message["content"])
33
 
34
+ # User input
35
+ if prompt := st.chat_input("Ask me anything:"):
36
+ # Display user message and store it
37
  st.chat_message("user").markdown(prompt)
 
38
  st.session_state.messages.append({"role": "user", "content": prompt})
39
 
40
+ # Generate response
41
+ with st.spinner("Thinking..."):
42
+ # Using the pipeline to generate a response
43
+ response = chat_pipeline(prompt)[0]["generated_text"]
44
+
45
+ # The model may return the prompt + response concatenated, so you might need
46
+ # to extract only the response part. This depends on how the model is trained.
47
+ # Here we assume the model returns the full text and we just remove the original prompt from it:
48
+ if response.startswith(prompt):
49
+ response = response[len(prompt):].strip()
50
+
51
+ # Display and store assistant response
52
  with st.chat_message("assistant"):
53
  st.markdown(response)
 
54
  st.session_state.messages.append({"role": "assistant", "content": response})