sravyaa02 commited on
Commit
b3391d7
·
verified ·
1 Parent(s): 6aafded

Upload 10 files

Browse files
Files changed (10) hide show
  1. .gitattributes +35 -0
  2. README.md +14 -0
  3. app.py +21 -0
  4. bash_profile.txt +1 -0
  5. chatbot.py +164 -0
  6. homepage.py +63 -0
  7. output.wav +0 -0
  8. predict_stress.py +114 -0
  9. requirements.txt +20 -0
  10. session_state.py +22 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Psychologist Chatbot
3
+ emoji: 📈
4
+ colorFrom: yellow
5
+ colorTo: indigo
6
+ sdk: streamlit
7
+ sdk_version: 1.39.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Interactive chatbot as a psychologist
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from homepage import display_homepage
3
+ from chatbot import display_chatbot
4
+ from predict_stress import display_predict_stress
5
+ from session_state import initialize_session_state
6
+
7
+ initialize_session_state()
8
+
9
+ # Main logic to switch between pages
10
+ if 'page' not in st.session_state:
11
+ st.session_state.page = "home"
12
+
13
+ if st.session_state.page == "home":
14
+ display_homepage()
15
+ elif st.session_state.page == "chat":
16
+ display_chatbot()
17
+ elif st.session_state.page == "stress":
18
+ display_predict_stress()
19
+
20
+ # Adjust the layout to reduce white space
21
+ st.markdown("<style>div.stContainer {padding-top: 0;}</style>", unsafe_allow_html=True)
bash_profile.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ export HF_TOKEN=$(cat ~/.secret/hugging-face.txt)
chatbot.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from langchain_core.prompts import PromptTemplate
3
+ from langchain_core.output_parsers import StrOutputParser
4
+ from transformers import pipeline
5
+ from langchain_huggingface import HuggingFaceEndpoint
6
+ import numpy as np
7
+ from pydub import AudioSegment
8
+ import os
9
+
10
+ # Model IDs
11
+ model_id = "unsloth/Llama-3.2-1B-Instruct"
12
+ model2_id = "mistralai/Mistral-7B-Instruct-v0.3"
13
+ whisper_model = "openai/whisper-small" # Using Whisper model for audio transcription
14
+
15
+ def get_llm_hf_inference(model_id, max_new_tokens=128, temperature=0.1):
16
+ """Returns a language model for HuggingFace inference."""
17
+ try:
18
+ llm = HuggingFaceEndpoint(
19
+ repo_id=model_id,
20
+ max_new_tokens=max_new_tokens,
21
+ temperature=temperature,
22
+ token=os.getenv("HF_TOKEN")
23
+ )
24
+ return llm
25
+ except Exception as e:
26
+ st.error(f"Error initializing model: {e}")
27
+ return None
28
+
29
+ # Initialize Whisper transcription model
30
+ def load_transcription_model():
31
+ try:
32
+ transcriber = pipeline("automatic-speech-recognition", model=whisper_model)
33
+ return transcriber
34
+ except Exception as e:
35
+ st.error(f"Error loading Whisper model: {e}")
36
+ return None
37
+
38
+ # Preprocess audio to 16kHz mono
39
+ def preprocess_audio(file):
40
+ audio = AudioSegment.from_file(file).set_frame_rate(16000).set_channels(1)
41
+ audio_samples = np.array(audio.get_array_of_samples()).astype(np.float32) / (2**15)
42
+ return audio_samples
43
+
44
+ # Function to transcribe audio with preprocessing
45
+ def transcribe_audio(file, transcriber):
46
+ audio = preprocess_audio(file)
47
+ transcription = transcriber(audio)["text"]
48
+ return transcription
49
+
50
+ # Chatbot page content
51
+ def display_chatbot():
52
+ st.title("Personal Psychologist Chatbot")
53
+ st.markdown(f"*This is a simple chatbot that acts as a psychologist and gives solutions to your psychological problems. It uses the {model_id}.*")
54
+
55
+ # Sidebar for settings
56
+ with st.sidebar:
57
+ # Reset Chat History
58
+ reset_history = st.button("Reset Chat History")
59
+ go_home = st.button("Back to Home")
60
+ if go_home:
61
+ st.session_state.page = "home"
62
+ st.experimental_rerun() # Go back to homepage
63
+
64
+ # Initialize or reset chat history
65
+ if reset_history:
66
+ st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}]
67
+
68
+ def get_response(system_message, chat_history, user_text, model_id, max_new_tokens=256):
69
+ """Generates a response from the chatbot model."""
70
+ hf = get_llm_hf_inference(model_id=model_id, max_new_tokens=max_new_tokens)
71
+ if hf is None:
72
+ return "Error: Model not initialized.", chat_history
73
+
74
+ # Create the prompt template
75
+ prompt = PromptTemplate.from_template(
76
+ (
77
+ "[INST] {system_message}"
78
+ "\nCurrent Conversation:\n{chat_history}\n\n"
79
+ "\nUser: {user_text}.\n [/INST]"
80
+ "\nAI:"
81
+ )
82
+ )
83
+ chat = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
84
+
85
+ # Generate the response
86
+ response = chat.invoke(input=dict(system_message=system_message, user_text=user_text, chat_history=chat_history))
87
+ response = response.split("AI:")[-1].strip()
88
+
89
+ # Check for "thank you" in the user's input and prompt for report
90
+ if "thank you" in user_text.lower():
91
+ follow_up_question = "Would you like to have a report of your current health?"
92
+ response += f"\n\n{follow_up_question}"
93
+
94
+ # Update the chat history
95
+ chat_history.append({'role': 'user', 'content': user_text})
96
+ chat_history.append({'role': 'assistant', 'content': response})
97
+ return response, chat_history
98
+
99
+ def get_summary_of_chat_history(chat_history, model2_id):
100
+ """Generates a summary of the entire chat history using GPT-2."""
101
+ hf = get_llm_hf_inference(model_id=model2_id, max_new_tokens=256)
102
+ if hf is None:
103
+ return "Error: Model not initialized."
104
+
105
+ # Create the summary prompt
106
+ chat_content = "\n".join([f"{message['role']}: {message['content']}" for message in chat_history])
107
+ prompt = PromptTemplate.from_template(
108
+ f"Please summarize the following conversation and generate a report of the user's current health:\n\n{chat_content}"
109
+ )
110
+
111
+ summary = prompt | hf.bind(skip_prompt=True) | StrOutputParser(output_key='content')
112
+ summary_response = summary.invoke(input={})
113
+
114
+ return summary_response
115
+
116
+ # Load Whisper model for transcription
117
+ transcriber = load_transcription_model()
118
+
119
+ # User input for audio and text
120
+ st.markdown("### Choose your input:")
121
+ audio_file = st.file_uploader("Upload an audio file for transcription", type=["mp3", "wav", "m4a"])
122
+ st.session_state.user_text = st.chat_input(placeholder="Or enter your text here.")
123
+
124
+ # Check if audio file is uploaded and transcribe if available
125
+ if audio_file is not None and transcriber:
126
+ with st.spinner("Transcribing audio..."):
127
+ try:
128
+ st.session_state.user_text = transcribe_audio(audio_file, transcriber)
129
+ st.success("Audio transcribed successfully!")
130
+ except Exception as e:
131
+ st.error(f"Error transcribing audio: {e}")
132
+
133
+ # Chat interface
134
+ output_container = st.container()
135
+
136
+ # Display chat messages
137
+ with output_container:
138
+ for message in st.session_state.chat_history:
139
+ if message['role'] == 'system':
140
+ continue
141
+ with st.chat_message(message['role'], avatar=st.session_state['avatars'][message['role']]):
142
+ st.markdown(message['content'])
143
+
144
+ # Process text input for chatbot response
145
+ if st.session_state.user_text:
146
+ with st.chat_message("user", avatar=st.session_state.avatars['user']):
147
+ st.markdown(st.session_state.user_text)
148
+
149
+ with st.chat_message("assistant", avatar=st.session_state.avatars['assistant']):
150
+ with st.spinner("Addressing your concerns..."):
151
+ response, st.session_state.chat_history = get_response(
152
+ system_message=st.session_state.system_message,
153
+ user_text=st.session_state.user_text,
154
+ chat_history=st.session_state.chat_history,
155
+ model_id=model_id,
156
+ max_new_tokens=st.session_state.max_response_length,
157
+ )
158
+ st.markdown(response)
159
+
160
+ # Check if the user has agreed to the report
161
+ if "yes" in st.session_state.user_text.lower() and "Would you like to have a report of your current health?" in response:
162
+ with st.spinner("Generating your health report..."):
163
+ report = get_summary_of_chat_history(st.session_state.chat_history, model2_id)
164
+ st.markdown(report)
homepage.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Homepage content
4
+ def display_homepage():
5
+ st.markdown(
6
+ """
7
+ <div style="text-align: center; margin-top: 50px; font-size: 60px; font-weight: bold; color: #2c3e50; margin-left: auto; margin-right: auto;">
8
+ Your personal AI Therapist
9
+ </div>
10
+ """, unsafe_allow_html=True
11
+ )
12
+
13
+ st.markdown(
14
+ """
15
+ <div style="text-align: center; font-size: 20px; color: #000000; margin-top: 20px; max-width: 700px; margin-left: auto; margin-right: auto;">
16
+ This is your healthcare chatbot that streamlines outpatient care, solves routine queries 24/7, and effortlessly automates appointment bookings, prescriptions, and reports. Let AI help you with your mental health journey.
17
+ </div>
18
+ """, unsafe_allow_html=True
19
+ )
20
+
21
+ st.markdown(
22
+ """
23
+ <style>
24
+ .stApp {
25
+ background-image: url('https://images.pexels.com/photos/2680270/pexels-photo-2680270.jpeg?auto=compress&cs=tinysrgb&w=1260&h=750&dpr=1');
26
+ background-size: cover;
27
+ background-position: center;
28
+ background-repeat: no-repeat;
29
+ height: 100vh;
30
+ color: white;
31
+ }
32
+
33
+ .stButton>button {
34
+ background-color: #2980b9;
35
+ color: white;
36
+ font-size: 18px;
37
+ font-weight: bold;
38
+ padding: 15px 30px;
39
+ border-radius: 8px;
40
+ border: none;
41
+ box-shadow: 0px 5px 15px rgba(0, 0, 0, 0.1);
42
+ cursor: pointer;
43
+ transition: background-color 0.3s ease;
44
+ }
45
+
46
+ .stButton>button:hover {
47
+ background-color: #3498db;
48
+ }
49
+ </style>
50
+ """, unsafe_allow_html=True
51
+ )
52
+
53
+ col1, col2 = st.columns([1, 1])
54
+
55
+ with col1:
56
+ if st.button("Start Chat", key="start_chat_button"):
57
+ st.session_state.page = "chat"
58
+ st.experimental_rerun() # Trigger page change
59
+
60
+ with col2:
61
+ if st.button("Predict Stress", key="predict_stress_button"):
62
+ st.session_state.page = "stress"
63
+ st.experimental_rerun() # Trigger page change
output.wav ADDED
Binary file (106 kB). View file
 
predict_stress.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import xgboost as xgb
3
+ import pandas as pd
4
+ from huggingface_hub import hf_hub_download
5
+
6
+ xgboostmodel_id = "Sannidhi/stress_prediction_xgboost_model"
7
+ xgboost_model = None
8
+
9
+ def load_xgboost_model():
10
+ global xgboost_model
11
+ try:
12
+ # Download the model from Hugging Face using huggingface_hub
13
+ model_path = hf_hub_download(repo_id="Sannidhi/stress_prediction_xgboost_model", filename="xgboost_model.json")
14
+
15
+ # Load the model into XGBoost
16
+ xgboost_model = xgb.Booster()
17
+ xgboost_model.load_model(model_path) # Load the model into the Booster object
18
+
19
+ return True
20
+ except Exception as e:
21
+ st.error(f"Error loading XGBoost model from Hugging Face: {e}")
22
+ return False
23
+
24
+ def display_predict_stress():
25
+ st.title("Predict Stress Level")
26
+ st.markdown("Answer the questions below to predict your stress level.")
27
+
28
+ # Sidebar for navigation
29
+ with st.sidebar:
30
+ go_home = st.button("Back to Home")
31
+ if go_home:
32
+ st.session_state.page = "home"
33
+ st.experimental_rerun() # Go back to homepage
34
+
35
+ load_xgboost_model()
36
+
37
+ # Define the form with dropdowns for user input
38
+ with st.form(key="stress_form"):
39
+ # Define the questions and their options
40
+ stress_questions = {
41
+ "How many fruits or vegetables do you eat every day?": ["0", "1", "2", "3", "4", "5"],
42
+ "How many new places do you visit in an year?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
43
+ "How many people are very close to you?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
44
+ "How many people do you help achieve a better life?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
45
+ "With how many people do you interact with during a typical day?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
46
+ "How many remarkable achievements are you proud of?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
47
+ "How many times do you donate your time or money to good causes?": ["0", "1", "2", "3", "4", "5"],
48
+ "How well do you complete your weekly to-do lists?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
49
+ "In a typical day, how many hours do you experience 'FLOW'? (Flow is defined as the mental state, in which you are fully immersed in performing an activity. You then experience a feeling of energized focus, full involvement, and enjoyment in the process of this activity)": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
50
+ "How many steps (in thousands) do you typically walk everyday?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
51
+ "For how many years ahead is your life vision very clear for?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
52
+ "About how long do you typically sleep?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
53
+ "How many days of vacation do you typically lose every year?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
54
+ "How often do you shout or sulk at somebody?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
55
+ "How sufficient is your income to cover basic life expenses (1 for insufficient, 2 for sufficient)?": ["1", "2"],
56
+ "How many recognitions have you received in your life?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
57
+ "How many hours do you spend everyday doing what you are passionate about?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
58
+ "In a typical week, how many times do you have the opportunity to think about yourself?": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10"],
59
+ "Age (1 = 'Less than 20' 2 = '21 to 35' 3 = '36 to 50' 4 = '51 or more')": ["1", "2", "3", "4"],
60
+ "Gender (1 = 'Female', 0 = 'Male')": ["0", "1"]
61
+ }
62
+
63
+ # Map the question strings to model feature names
64
+ question_to_feature_map = {
65
+ "How many fruits or vegetables do you eat every day?": "FRUITS_VEGGIES",
66
+ "How many new places do you visit in an year?": "PLACES_VISITED",
67
+ "How many people are very close to you?": "CORE_CIRCLE",
68
+ "How many people do you help achieve a better life?": "SUPPORTING_OTHERS",
69
+ "With how many people do you interact with during a typical day?": "SOCIAL_NETWORK",
70
+ "How many remarkable achievements are you proud of?": "ACHIEVEMENT",
71
+ "How many times do you donate your time or money to good causes?": "DONATION",
72
+ "How well do you complete your weekly to-do lists?": "TODO_COMPLETED",
73
+ "In a typical day, how many hours do you experience 'FLOW'? (Flow is defined as the mental state, in which you are fully immersed in performing an activity. You then experience a feeling of energized focus, full involvement, and enjoyment in the process of this activity)": "FLOW",
74
+ "How many steps (in thousands) do you typically walk everyday?": "DAILY_STEPS",
75
+ "For how many years ahead is your life vision very clear for?": "LIVE_VISION",
76
+ "About how long do you typically sleep?": "SLEEP_HOURS",
77
+ "How many days of vacation do you typically lose every year?": "LOST_VACATION",
78
+ "How often do you shout or sulk at somebody?": "DAILY_SHOUTING",
79
+ "How sufficient is your income to cover basic life expenses (1 for insufficient, 2 for sufficient)?": "SUFFICIENT_INCOME",
80
+ "How many recognitions have you received in your life?": "PERSONAL_AWARDS",
81
+ "How many hours do you spend everyday doing what you are passionate about?": "TIME_FOR_PASSION",
82
+ "In a typical week, how many times do you have the opportunity to think about yourself?": "WEEKLY_MEDITATION",
83
+ "Age (1 = 'Less than 20' 2 = '21 to 35' 3 = '36 to 50' 4 = '51 or more')": "AGE",
84
+ "Gender (1 = 'Female', 0 = 'Male')": "GENDER"
85
+ }
86
+
87
+ # Map the responses to numerical values
88
+ response_map = {str(i): i for i in range(11)} # Mapping 0-10 to 0-10
89
+ response_map.update({"1": 1, "2": 2}) # Mapping "1" and "2" for certain questions
90
+
91
+ # Store user responses
92
+ responses = {}
93
+ for question, options in stress_questions.items():
94
+ responses[question] = st.selectbox(question, options)
95
+
96
+ # Submit button
97
+ submit_button = st.form_submit_button("Submit")
98
+
99
+ # When submit is clicked, process the responses and make a prediction
100
+ if submit_button:
101
+ # Convert responses to feature dictionary based on the feature names
102
+ feature_dict = {question_to_feature_map[q]: response_map[responses[q]] for q in stress_questions.keys()}
103
+
104
+ # Convert to pandas DataFrame
105
+ feature_df = pd.DataFrame([feature_dict])
106
+
107
+ # Make prediction
108
+ try:
109
+ dmatrix = xgb.DMatrix(feature_df)
110
+ prediction = xgboost_model.predict(dmatrix)
111
+ st.markdown(f"### Predicted Stress Level: {prediction[0]:.2f}")
112
+ st.markdown("Higher values indicate higher stress levels.")
113
+ except Exception as e:
114
+ st.error(f"Error making prediction: {e}")
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ huggingface_hub
3
+ streamlit
4
+ langchain_core
5
+ langchain_community
6
+ langchain_huggingface
7
+ langchain_text_splitters
8
+ accelerate
9
+ watchdog
10
+ tqdm
11
+ pydub
12
+ bitsandbytes
13
+ git+https://github.com/openai/whisper.git
14
+ torch
15
+ torchaudio
16
+ torchvision
17
+ streamlit-mic-recorder
18
+ ffmpeg
19
+ openai-whisper
20
+ torchaudio
session_state.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # session_state.py
2
+
3
+ import streamlit as st
4
+
5
+ def initialize_session_state():
6
+ if "avatars" not in st.session_state:
7
+ st.session_state.avatars = {'user': None, 'assistant': None}
8
+
9
+ if 'user_text' not in st.session_state:
10
+ st.session_state.user_text = None
11
+
12
+ if "max_response_length" not in st.session_state:
13
+ st.session_state.max_response_length = 1000
14
+
15
+ if "system_message" not in st.session_state:
16
+ st.session_state.system_message = "friendly AI conversing with a human user"
17
+
18
+ if "starter_message" not in st.session_state:
19
+ st.session_state.starter_message = "Hello, there! How can I help you today?"
20
+
21
+ if "chat_history" not in st.session_state:
22
+ st.session_state.chat_history = [{"role": "assistant", "content": st.session_state.starter_message}]