benticha commited on
Commit
df64ccd
Β·
verified Β·
1 Parent(s): 05e6dad

Added user profile

Browse files
Files changed (1) hide show
  1. app.py +164 -129
app.py CHANGED
@@ -7,143 +7,178 @@ from langchain_core.tracers.context import collect_runs
7
  from qdrant_client import QdrantClient
8
  from dotenv import load_dotenv
9
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- load_dotenv()
12
- client = Client()
13
- qdrant_api=os.getenv("QDRANT_API_KEY")
14
- qdrant_url=os.getenv("QDRANT_URL")
15
- qdrant_client = QdrantClient(qdrant_url ,api_key=qdrant_api)
16
- st.set_page_config(page_title = "SUP'ASSISTANT")
17
- st.subheader("Hey there! How can I help you today!")
18
-
19
- memory = lc_memory.ConversationBufferMemory(
20
- chat_memory=lc_memory.StreamlitChatMessageHistory(key="langchain_messages"),
21
- return_messages=True,
22
- memory_key="chat_history",
23
- )
24
- st.sidebar.markdown("## Feedback Scale")
25
- feedback_option = (
26
- "thumbs" if st.sidebar.toggle(label="`Faces` ⇄ `Thumbs`", value=False) else "faces"
27
- )
28
- with st.sidebar:
29
- model_name = st.selectbox("**Model**", options=["llama-3.1-70b-versatile","gemma2-9b-it","gemma-7b-it","llama-3.2-3b-preview", "llama3-70b-8192", "mixtral-8x7b-32768"])
30
- temp = st.slider("**Temperature**", min_value=0.0, max_value=1.0, step=0.001)
31
- n_docs = st.number_input("**Number of retireved documents**", min_value=0, max_value=10, value=5, step=1)
32
- if st.sidebar.button("Clear message history"):
33
- print("Clearing message history")
34
- memory.clear()
35
-
36
- retriever = retriever(n_docs=n_docs)
37
- # Create Chain
38
- chain = get_expression_chain(retriever,model_name,temp)
39
-
40
- for msg in st.session_state.langchain_messages:
41
- avatar = "🦜" if msg.type == "ai" else None
42
- with st.chat_message(msg.type, avatar=avatar):
43
- st.markdown(msg.content)
44
-
45
-
46
- prompt = st.chat_input(placeholder="What do you need to know about SUP'COM ?")
47
-
48
- if prompt :
49
- with st.chat_message("user"):
50
- st.write(prompt)
51
-
52
- with st.chat_message("assistant", avatar="🦜"):
53
- message_placeholder = st.empty()
54
- full_response = ""
55
- # Define the basic input structure for the chains
56
- input_dict = {"input": prompt.lower()}
57
-
58
-
59
- with collect_runs() as cb:
60
- for chunk in chain.stream(input_dict, config={"tags": ["SUP'ASSISTANT"]}):
61
- full_response += chunk.content
62
- message_placeholder.markdown(full_response + "β–Œ")
63
- memory.save_context(input_dict, {"output": full_response})
64
- st.session_state.run_id = cb.traced_runs[0].id
65
- message_placeholder.markdown(full_response)
66
-
67
- with st.spinner("Just a sec! Dont enter prompts while loading pelase!"):
68
- run_id = st.session_state.run_id
69
- question_embedding = get_embeddings(prompt)
70
- answer_embedding = get_embeddings(full_response)
71
- # Add question and answer to Qdrant
72
- qdrant_client.upload_collection(
73
- collection_name="chat-history",
74
- payload=[
75
- {"text": prompt, "type": "question", "question_ID": run_id},
76
- {"text": full_response, "type": "answer", "question_ID": run_id}
77
- ],
78
- vectors=[
79
- question_embedding,
80
- answer_embedding,
81
- ],
82
- parallel=4,
83
- max_retries=3,
84
- )
85
 
86
-
 
 
 
87
 
88
- if st.session_state.get("run_id"):
89
- run_id = st.session_state.run_id
90
- feedback = streamlit_feedback(
91
- feedback_type=feedback_option,
92
- optional_text_label="[Optional] Please provide an explanation",
93
- key=f"feedback_{run_id}",
94
- )
95
 
96
- # Define score mappings for both "thumbs" and "faces" feedback systems
97
- score_mappings = {
98
- "thumbs": {"πŸ‘": 1, "πŸ‘Ž": 0},
99
- "faces": {"πŸ˜€": 1, "πŸ™‚": 0.75, "😐": 0.5, "πŸ™": 0.25, "😞": 0},
100
- }
101
 
102
- # Get the score mapping based on the selected feedback option
103
- scores = score_mappings[feedback_option]
 
 
 
 
 
 
 
104
 
105
- if feedback:
106
- # Get the score from the selected feedback option's score mapping
107
- score = scores.get(feedback["score"])
108
 
109
- if score is not None:
110
- # Formulate feedback type string incorporating the feedback option
111
- # and score value
112
- feedback_type_str = f"{feedback_option} {feedback['score']}"
 
 
 
113
 
114
- # Record the feedback with the formulated feedback type string
115
- # and optional comment
116
  with st.spinner("Just a sec! Dont enter prompts while loading pelase!"):
117
- feedback_record = client.create_feedback(
118
- run_id,
119
- feedback_type_str,
120
- score=score,
121
- comment=feedback.get("text"),
122
- )
123
- st.session_state.feedback = {
124
- "feedback_id": str(feedback_record.id),
125
- "score": score,
126
- }
127
- else:
128
- st.warning("Invalid feedback score.")
129
-
130
- with st.spinner("Just a sec! Dont enter prompts while loading pelase!"):
131
- if feedback.get("text"):
132
- comment = feedback.get("text")
133
- feedback_embedding = get_embeddings(comment)
134
- else:
135
- comment = "no comment"
136
- feedback_embedding = get_embeddings(comment)
137
 
138
 
139
- qdrant_client.upload_collection(
140
- collection_name="chat-history",
141
- payload=[
142
- {"text": comment,"Score:":score, "type": "feedback", "question_ID": run_id}
143
- ],
144
- vectors=[
145
- feedback_embedding
146
- ],
147
- parallel=4,
148
- max_retries=3,
149
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  from qdrant_client import QdrantClient
8
  from dotenv import load_dotenv
9
  import os
10
+ if "access_granted" not in st.session_state:
11
+ st.session_state.access_granted = False
12
+ if "profile" not in st.session_state:
13
+ st.session_state.profile = None
14
+ if "name" not in st.session_state:
15
+ st.session_state.name = None
16
+ if not st.session_state.access_granted:
17
+ # Profile input section
18
+ st.title("User Profile")
19
+ name = st.text_input("Name")
20
+ profile_selector = st.selectbox("Profile", options=["Student", "Professor", "Administrator", "Other"])
21
+
22
+ if profile_selector == "Other":
23
+ profile = st.text_input("What is your role?")
24
+ else:
25
+ profile = profile_selector
26
+
27
+ if profile and name:
28
+ d = False
29
+ else:
30
+ d = True
31
+
32
+ submission = st.button("Submit", disabled=d)
33
+
34
+ if submission:
35
+ st.session_state.profile = profile
36
+ st.session_state.name = name
37
+ st.session_state.access_granted = True # Grant access to main app
38
+ st.rerun() # Reload the app
39
+ else:
40
+ load_dotenv()
41
+ profile = st.session_state.profile
42
+ client = Client()
43
+ qdrant_api=os.getenv("QDRANT_API_KEY")
44
+ qdrant_url=os.getenv("QDRANT_URL")
45
+ qdrant_client = QdrantClient(qdrant_url ,api_key=qdrant_api)
46
+ st.set_page_config(page_title = "SUP'ASSISTANT")
47
+ st.subheader(f"Hello {st.session_state.name}! How can I help you today!")
48
+
49
+ memory = lc_memory.ConversationBufferMemory(
50
+ chat_memory=lc_memory.StreamlitChatMessageHistory(key="langchain_messages"),
51
+ return_messages=True,
52
+ memory_key="chat_history",
53
+ )
54
+ st.sidebar.markdown("## Feedback Scale")
55
+ feedback_option = (
56
+ "thumbs" if st.sidebar.toggle(label="`Faces` ⇄ `Thumbs`", value=False) else "faces"
57
+ )
58
+ with st.sidebar:
59
+ model_name = st.selectbox("**Model**", options=["llama-3.1-70b-versatile","gemma2-9b-it","gemma-7b-it","llama-3.2-3b-preview", "llama3-70b-8192", "mixtral-8x7b-32768"])
60
+ temp = st.slider("**Temperature**", min_value=0.0, max_value=1.0, step=0.001)
61
+ n_docs = st.number_input("**Number of retireved documents**", min_value=0, max_value=10, value=5, step=1)
62
+ if st.sidebar.button("Clear message history"):
63
+ print("Clearing message history")
64
+ memory.clear()
65
 
66
+ retriever = retriever(n_docs=n_docs)
67
+ # Create Chain
68
+ chain = get_expression_chain(retriever,model_name,temp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
+ for msg in st.session_state.langchain_messages:
71
+ avatar = "🦜" if msg.type == "ai" else None
72
+ with st.chat_message(msg.type, avatar=avatar):
73
+ st.markdown(msg.content)
74
 
 
 
 
 
 
 
 
75
 
76
+ prompt = st.chat_input(placeholder="What do you need to know about SUP'COM ?")
 
 
 
 
77
 
78
+ if prompt :
79
+ with st.chat_message("user"):
80
+ st.write(prompt)
81
+
82
+ with st.chat_message("assistant", avatar="🦜"):
83
+ message_placeholder = st.empty()
84
+ full_response = ""
85
+ # Define the basic input structure for the chains
86
+ input_dict = {"input": prompt.lower()}
87
 
 
 
 
88
 
89
+ with collect_runs() as cb:
90
+ for chunk in chain.stream(input_dict, config={"tags": ["SUP'ASSISTANT"]}):
91
+ full_response += chunk.content
92
+ message_placeholder.markdown(full_response + "β–Œ")
93
+ memory.save_context(input_dict, {"output": full_response})
94
+ st.session_state.run_id = cb.traced_runs[0].id
95
+ message_placeholder.markdown(full_response)
96
 
 
 
97
  with st.spinner("Just a sec! Dont enter prompts while loading pelase!"):
98
+ run_id = st.session_state.run_id
99
+ question_embedding = get_embeddings(prompt)
100
+ answer_embedding = get_embeddings(full_response)
101
+ # Add question and answer to Qdrant
102
+ qdrant_client.upload_collection(
103
+ collection_name="chat-history",
104
+ payload=[
105
+ {"text": prompt, "type": "question", "question_ID": run_id},
106
+ {"text": full_response, "type": "answer", "question_ID": run_id}
107
+ ],
108
+ vectors=[
109
+ question_embedding,
110
+ answer_embedding,
111
+ ],
112
+ parallel=4,
113
+ max_retries=3,
114
+ )
 
 
 
115
 
116
 
117
+
118
+ if st.session_state.get("run_id"):
119
+ run_id = st.session_state.run_id
120
+ feedback = streamlit_feedback(
121
+ feedback_type=feedback_option,
122
+ optional_text_label="[Optional] Please provide an explanation",
123
+ key=f"feedback_{run_id}",
124
+ )
125
+
126
+ # Define score mappings for both "thumbs" and "faces" feedback systems
127
+ score_mappings = {
128
+ "thumbs": {"πŸ‘": 1, "πŸ‘Ž": 0},
129
+ "faces": {"πŸ˜€": 1, "πŸ™‚": 0.75, "😐": 0.5, "πŸ™": 0.25, "😞": 0},
130
+ }
131
+
132
+ # Get the score mapping based on the selected feedback option
133
+ scores = score_mappings[feedback_option]
134
+
135
+ if feedback:
136
+ # Get the score from the selected feedback option's score mapping
137
+ score = scores.get(feedback["score"])
138
+
139
+ if score is not None:
140
+ # Formulate feedback type string incorporating the feedback option
141
+ # and score value
142
+ feedback_type_str = f"{feedback_option} {feedback['score']}"
143
+
144
+ # Record the feedback with the formulated feedback type string
145
+ # and optional comment
146
+ with st.spinner("Just a sec! Dont enter prompts while loading pelase!"):
147
+ feedback_record = client.create_feedback(
148
+ run_id,
149
+ feedback_type_str,
150
+ score=score,
151
+ comment=feedback.get("text"),
152
+ source_info={"profile":profile}
153
+ )
154
+ st.session_state.feedback = {
155
+ "feedback_id": str(feedback_record.id),
156
+ "score": score,
157
+ }
158
+ else:
159
+ st.warning("Invalid feedback score.")
160
+
161
+ with st.spinner("Just a sec! Dont enter prompts while loading pelase!"):
162
+ if feedback.get("text"):
163
+ comment = feedback.get("text")
164
+ feedback_embedding = get_embeddings(comment)
165
+ else:
166
+ comment = "no comment"
167
+ feedback_embedding = get_embeddings(comment)
168
+
169
+
170
+ qdrant_client.upload_collection(
171
+ collection_name="chat-history",
172
+ payload=[
173
+ {"text": comment,
174
+ "Score:":score,
175
+ "type": "feedback",
176
+ "question_ID": run_id,
177
+ "User_profile":profile}
178
+ ],
179
+ vectors=[
180
+ feedback_embedding
181
+ ],
182
+ parallel=4,
183
+ max_retries=3,
184
+ )