Grandediw commited on
Commit
c757a2d
·
1 Parent(s): adeb071
Files changed (2) hide show
  1. app.py +3 -7
  2. requirements.txt +3 -0
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import streamlit as st
2
- pip install transformers
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
 
5
  st.set_page_config(page_title="Hugging Face Chatbot", layout="centered")
@@ -27,7 +26,7 @@ chat_pipeline = load_model()
27
  if "messages" not in st.session_state:
28
  st.session_state.messages = []
29
 
30
- # Display chat messages from history on app rerun
31
  for message in st.session_state.messages:
32
  with st.chat_message(message["role"]):
33
  st.markdown(message["content"])
@@ -40,12 +39,9 @@ if prompt := st.chat_input("Ask me anything:"):
40
 
41
  # Generate response
42
  with st.spinner("Thinking..."):
43
- # Using the pipeline to generate a response
44
  response = chat_pipeline(prompt)[0]["generated_text"]
45
-
46
- # The model may return the prompt + response concatenated, so you might need
47
- # to extract only the response part. This depends on how the model is trained.
48
- # Here we assume the model returns the full text and we just remove the original prompt from it:
49
  if response.startswith(prompt):
50
  response = response[len(prompt):].strip()
51
 
 
1
  import streamlit as st
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
 
4
  st.set_page_config(page_title="Hugging Face Chatbot", layout="centered")
 
26
  if "messages" not in st.session_state:
27
  st.session_state.messages = []
28
 
29
+ # Display chat messages from history
30
  for message in st.session_state.messages:
31
  with st.chat_message(message["role"]):
32
  st.markdown(message["content"])
 
39
 
40
  # Generate response
41
  with st.spinner("Thinking..."):
 
42
  response = chat_pipeline(prompt)[0]["generated_text"]
43
+ # The model might return the prompt + response together.
44
+ # If that's the case, remove the prompt from the start.
 
 
45
  if response.startswith(prompt):
46
  response = response[len(prompt):].strip()
47
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ torch