suriya7 commited on
Commit
e77f60d
β€’
1 Parent(s): 10a9d3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -10,7 +10,7 @@ import base64
10
  # Load environment variables
11
  load_dotenv()
12
 
13
- icons = {"assistant": "./chat-assistant.jpeg", "user": "./hum.jpeg"}
14
 
15
  # Configure the Llama index settings
16
  Settings.llm = HuggingFaceInferenceAPI(
@@ -107,16 +107,10 @@ if user_prompt and uploaded_file:
107
  st.session_state.messages.append({'role': 'user', "content": user_prompt})
108
  with st.chat_message("user", avatar="./hum.jpeg"):
109
  st.write(user_prompt)
110
- # response = handle_query(user_prompt)
111
- # st.session_state.messages.append({'role': 'assistant', "content": response})
112
-
113
- # for message in st.session_state.messages:
114
- # with st.chat_message(message['role']):
115
- # st.write(message['content'])
116
 
117
  # Generate a new response if last message is not from assistant
118
  if st.session_state.messages[-1]["role"] != "assistant":
119
- with st.chat_message("assistant",avatar="./chat-assistant.jpeg"):
120
  response = handle_query(user_prompt)
121
  full_response = st.write_stream(response)
122
  message = {"role": "assistant", "content": full_response}
 
10
  # Load environment variables
11
  load_dotenv()
12
 
13
+ icons = {"assistant": "πŸ‘½", "user": "./hum.jpeg"}
14
 
15
  # Configure the Llama index settings
16
  Settings.llm = HuggingFaceInferenceAPI(
 
107
  st.session_state.messages.append({'role': 'user', "content": user_prompt})
108
  with st.chat_message("user", avatar="./hum.jpeg"):
109
  st.write(user_prompt)
 
 
 
 
 
 
110
 
111
  # Generate a new response if last message is not from assistant
112
  if st.session_state.messages[-1]["role"] != "assistant":
113
+ with st.chat_message("assistant",avatar="πŸ‘½"):
114
  response = handle_query(user_prompt)
115
  full_response = st.write_stream(response)
116
  message = {"role": "assistant", "content": full_response}