Muhammadbilal10101 commited on
Commit
644fb9b
·
verified ·
1 Parent(s): a9f9aa3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +154 -154
app.py CHANGED
@@ -1,155 +1,155 @@
1
- import streamlit as st
2
- import time
3
- from autism_chatbot import *
4
-
5
- class StreamHandler:
6
- def __init__(self, placeholder):
7
- self.text = ""
8
- self.text_container = placeholder
9
-
10
- def append_text(self, text: str) -> None:
11
- self.text += text
12
- self.text_container.markdown(self.text)
13
-
14
- class StreamingGroqLLM(GroqLLM):
15
- stream_handler: Any = Field(None, description="Stream handler for real-time output")
16
-
17
- def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
18
- completion = self.client.chat.completions.create(
19
- messages=[{"role": "user", "content": prompt}],
20
- model=self.model_name,
21
- stream=True,
22
- **kwargs
23
- )
24
-
25
- collected_chunks = []
26
- collected_messages = []
27
-
28
- for chunk in completion:
29
- chunk_message = chunk.choices[0].delta.content
30
- if chunk_message is not None:
31
- collected_chunks.append(chunk_message)
32
- collected_messages.append(chunk_message)
33
- if self.stream_handler:
34
- self.stream_handler.append_text(chunk_message)
35
- time.sleep(0.05)
36
-
37
- return ''.join(collected_messages)
38
-
39
- class StreamingAutismResearchBot(AutismResearchBot):
40
- def __init__(self, groq_api_key: str, stream_handler: StreamHandler, index_path: str = "faiss_index"):
41
- self.llm = StreamingGroqLLM(
42
- groq_api_key=groq_api_key,
43
- model_name="llama-3.3-70b-versatile",
44
- stream_handler=stream_handler
45
- )
46
-
47
- self.embeddings = HuggingFaceEmbeddings(
48
- model_name="./local_model",
49
- model_kwargs={'device': 'cpu'}
50
- )
51
- self.db = FAISS.load_local(index_path, self.embeddings, allow_dangerous_deserialization=True)
52
-
53
- self.memory = ConversationBufferMemory(
54
- memory_key="chat_history",
55
- return_messages=True,
56
- output_key="answer"
57
- )
58
-
59
- self.qa_chain = self._create_qa_chain()
60
-
61
- def main():
62
- # Page configuration
63
- st.set_page_config(
64
- page_title="Autism Research Assistant",
65
- page_icon="🧩",
66
- layout="wide"
67
- )
68
-
69
- # Add custom CSS
70
- st.markdown("""
71
- <style>
72
- .stApp {
73
- max-width: 1200px;
74
- margin: 0 auto;
75
- }
76
- .stMarkdown {
77
- font-size: 16px;
78
- }
79
- .chat-message {
80
- padding: 1rem;
81
- border-radius: 0.5rem;
82
- margin-bottom: 1rem;
83
- }
84
- .timestamp {
85
- font-size: 0.8em;
86
- color: #666;
87
- }
88
- </style>
89
- """, unsafe_allow_html=True)
90
-
91
- # Header
92
- st.title("🧩 Autism Research Assistant")
93
- st.markdown("""
94
- Welcome to your AI-powered autism research assistant. I'm here to provide evidence-based
95
- assessments and therapy recommendations based on scientific research.
96
- """)
97
-
98
- # Initialize session state
99
- if 'messages' not in st.session_state:
100
- st.session_state.messages = [
101
- {"role": "assistant", "content": "Hello! I'm your autism research assistant. How can I help you today?"}
102
- ]
103
-
104
- # Initialize bot
105
- if 'bot' not in st.session_state:
106
- st.session_state.stream_container = None
107
- st.session_state.bot = None
108
-
109
- # Display chat messages
110
- for message in st.session_state.messages:
111
- with st.chat_message(message["role"]):
112
- st.write(f"{message['content']}")
113
- st.caption(f"{time.strftime('%I:%M %p')}")
114
-
115
- # Chat input
116
- if prompt := st.chat_input("Type your message here..."):
117
- # Display user message
118
- with st.chat_message("user"):
119
- st.write(prompt)
120
- st.caption(f"{time.strftime('%I:%M %p')}")
121
-
122
- # Add to session state
123
- st.session_state.messages.append({"role": "user", "content": prompt})
124
-
125
- # Create a new chat message container for the assistant's response
126
- assistant_message = st.chat_message("assistant")
127
- with assistant_message:
128
- # Create a placeholder for the streaming text
129
- stream_placeholder = st.empty()
130
-
131
- # Initialize the bot with the new stream handler if not already initialized
132
- if st.session_state.bot is None:
133
- stream_handler = StreamHandler(stream_placeholder)
134
- st.session_state.bot = StreamingAutismResearchBot(
135
- groq_api_key="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",
136
- stream_handler=stream_handler,
137
- )
138
- else:
139
- # Update the stream handler with the new placeholder
140
- st.session_state.bot.llm.stream_handler.text = ""
141
- st.session_state.bot.llm.stream_handler.text_container = stream_placeholder
142
-
143
- # Generate response
144
- response = st.session_state.bot.answer_question(prompt)
145
-
146
- # Clear the streaming placeholder and display the final message
147
- stream_placeholder.empty()
148
- st.write(response['answer'])
149
- st.caption(f"{time.strftime('%I:%M %p')}")
150
-
151
- # Add bot response to session state
152
- st.session_state.messages.append({"role": "assistant", "content": response['answer']})
153
-
154
- if __name__ == "__main__":
155
  main()
 
1
+ import streamlit as st
2
+ import time
3
+ from autism_chatbot import *
4
+
5
+ class StreamHandler:
6
+ def __init__(self, placeholder):
7
+ self.text = ""
8
+ self.text_container = placeholder
9
+
10
+ def append_text(self, text: str) -> None:
11
+ self.text += text
12
+ self.text_container.markdown(self.text)
13
+
14
+ class StreamingGroqLLM(GroqLLM):
15
+ stream_handler: Any = Field(None, description="Stream handler for real-time output")
16
+
17
+ def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
18
+ completion = self.client.chat.completions.create(
19
+ messages=[{"role": "user", "content": prompt}],
20
+ model=self.model_name,
21
+ stream=True,
22
+ **kwargs
23
+ )
24
+
25
+ collected_chunks = []
26
+ collected_messages = []
27
+
28
+ for chunk in completion:
29
+ chunk_message = chunk.choices[0].delta.content
30
+ if chunk_message is not None:
31
+ collected_chunks.append(chunk_message)
32
+ collected_messages.append(chunk_message)
33
+ if self.stream_handler:
34
+ self.stream_handler.append_text(chunk_message)
35
+ time.sleep(0.05)
36
+
37
+ return ''.join(collected_messages)
38
+
39
+ class StreamingAutismResearchBot(AutismResearchBot):
40
+ def __init__(self, groq_api_key: str, stream_handler: StreamHandler, index_path: str = "faiss_index"):
41
+ self.llm = StreamingGroqLLM(
42
+ groq_api_key=groq_api_key,
43
+ model_name="llama-3.3-70b-versatile",
44
+ stream_handler=stream_handler
45
+ )
46
+
47
+ self.embeddings = HuggingFaceEmbeddings(
48
+ model_name="pritamdeka/S-PubMedBert-MS-MARCO",
49
+ model_kwargs={'device': 'cpu'}
50
+ )
51
+ self.db = FAISS.load_local(index_path, self.embeddings, allow_dangerous_deserialization=True)
52
+
53
+ self.memory = ConversationBufferMemory(
54
+ memory_key="chat_history",
55
+ return_messages=True,
56
+ output_key="answer"
57
+ )
58
+
59
+ self.qa_chain = self._create_qa_chain()
60
+
61
+ def main():
62
+ # Page configuration
63
+ st.set_page_config(
64
+ page_title="Autism Research Assistant",
65
+ page_icon="🧩",
66
+ layout="wide"
67
+ )
68
+
69
+ # Add custom CSS
70
+ st.markdown("""
71
+ <style>
72
+ .stApp {
73
+ max-width: 1200px;
74
+ margin: 0 auto;
75
+ }
76
+ .stMarkdown {
77
+ font-size: 16px;
78
+ }
79
+ .chat-message {
80
+ padding: 1rem;
81
+ border-radius: 0.5rem;
82
+ margin-bottom: 1rem;
83
+ }
84
+ .timestamp {
85
+ font-size: 0.8em;
86
+ color: #666;
87
+ }
88
+ </style>
89
+ """, unsafe_allow_html=True)
90
+
91
+ # Header
92
+ st.title("🧩 Autism Research Assistant")
93
+ st.markdown("""
94
+ Welcome to your AI-powered autism research assistant. I'm here to provide evidence-based
95
+ assessments and therapy recommendations based on scientific research.
96
+ """)
97
+
98
+ # Initialize session state
99
+ if 'messages' not in st.session_state:
100
+ st.session_state.messages = [
101
+ {"role": "assistant", "content": "Hello! I'm your autism research assistant. How can I help you today?"}
102
+ ]
103
+
104
+ # Initialize bot
105
+ if 'bot' not in st.session_state:
106
+ st.session_state.stream_container = None
107
+ st.session_state.bot = None
108
+
109
+ # Display chat messages
110
+ for message in st.session_state.messages:
111
+ with st.chat_message(message["role"]):
112
+ st.write(f"{message['content']}")
113
+ st.caption(f"{time.strftime('%I:%M %p')}")
114
+
115
+ # Chat input
116
+ if prompt := st.chat_input("Type your message here..."):
117
+ # Display user message
118
+ with st.chat_message("user"):
119
+ st.write(prompt)
120
+ st.caption(f"{time.strftime('%I:%M %p')}")
121
+
122
+ # Add to session state
123
+ st.session_state.messages.append({"role": "user", "content": prompt})
124
+
125
+ # Create a new chat message container for the assistant's response
126
+ assistant_message = st.chat_message("assistant")
127
+ with assistant_message:
128
+ # Create a placeholder for the streaming text
129
+ stream_placeholder = st.empty()
130
+
131
+ # Initialize the bot with the new stream handler if not already initialized
132
+ if st.session_state.bot is None:
133
+ stream_handler = StreamHandler(stream_placeholder)
134
+ st.session_state.bot = StreamingAutismResearchBot(
135
+ groq_api_key="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",
136
+ stream_handler=stream_handler,
137
+ )
138
+ else:
139
+ # Update the stream handler with the new placeholder
140
+ st.session_state.bot.llm.stream_handler.text = ""
141
+ st.session_state.bot.llm.stream_handler.text_container = stream_placeholder
142
+
143
+ # Generate response
144
+ response = st.session_state.bot.answer_question(prompt)
145
+
146
+ # Clear the streaming placeholder and display the final message
147
+ stream_placeholder.empty()
148
+ st.write(response['answer'])
149
+ st.caption(f"{time.strftime('%I:%M %p')}")
150
+
151
+ # Add bot response to session state
152
+ st.session_state.messages.append({"role": "assistant", "content": response['answer']})
153
+
154
+ if __name__ == "__main__":
155
  main()