harris1 commited on
Commit
1c3f393
·
verified ·
1 Parent(s): 49465e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +219 -140
app.py CHANGED
@@ -1,147 +1,226 @@
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
- from openai_client import (
3
- get_code_review_response,
4
- refactor_code,
5
- code_feedback,
6
- suggest_best_practices,
7
- remove_code_errors,
8
- )
9
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
- def main():
12
- st.title("CodeMentor - (AI-Enhanced Code Collaboration Tool)")
13
- st.subheader("Collaborate, Refactor, and Optimize with AI.")
14
- st.write(
15
- "A smart tool for distributed teams to automate code reviews, refactor efficiently, and get real-time AI-driven feedback."
16
- )
17
-
18
- # Instructions
19
- st.write(
20
- "Upload a file or paste your code below to get an AI-generated code review."
21
- )
22
-
23
- # Input Methods: File Upload or Text Area
24
- uploaded_file = st.file_uploader(
25
- "Upload a code file (Max 500 lines)", type=["py", "js", "txt"]
26
- )
27
- code_input = st.text_area("Or paste your code here (Max 1000 words)", height=300)
28
-
29
- # Limit input size for code
30
- if uploaded_file:
31
- code = uploaded_file.read().decode("utf-8")
32
- if len(code.splitlines()) > 500:
33
- st.error(
34
- "File is too large! Please upload a file with a maximum of 500 lines."
35
- )
36
- code = None # Reset code if it's too large
37
  else:
38
- st.success(f"File uploaded: {uploaded_file.name}")
39
- elif code_input:
40
- code = code_input
41
- if len(code.split()) > 1000:
42
- st.error("Code exceeds 1000 words! Please shorten your code.")
43
- code = None # Reset code if it's too large
44
- else:
45
- code = None
46
-
47
- # Button to trigger code review
48
- if st.button("Get Code Review") and code:
49
- with st.spinner("Processing..."):
50
- # Call the OpenAI API to get code review
51
- review = get_code_review_response(code)
52
- st.subheader("Code Review Results:")
53
- st.write(review)
54
-
55
- # Provide download option
56
- st.download_button(
57
- label="Download Code Review",
58
- data=review,
59
- file_name="code_review.txt",
60
- mime="text/plain",
61
- )
62
- st.success("You can download the code review as code_review.txt")
63
-
64
- # Button to trigger code refactoring
65
- if st.button("Refactor Code") and code:
66
- with st.spinner("Refactoring your code..."):
67
- refactored_code = refactor_code(code)
68
- st.subheader("Refactored Code:")
69
- st.write(refactored_code)
70
-
71
- # Provide download option for refactored code
72
- st.download_button(
73
- label="Download Refactored Code",
74
- data=refactored_code,
75
- file_name="refactored_code.txt",
76
- mime="text/plain",
77
- )
78
- st.success("You can download the refactored code as refactored_code.txt")
79
-
80
- # Button to trigger code feedback
81
- if st.button("Get Code Feedback") and code:
82
- with st.spinner("Getting feedback on your code..."):
83
- feedback = code_feedback(code)
84
- st.subheader("Code Feedback:")
85
- st.write(feedback)
86
-
87
- # Ensure feedback is a string for download
88
- feedback_text = feedback if isinstance(feedback, str) else str(feedback)
89
-
90
- # Provide download option for code feedback
91
- st.download_button(
92
- label="Download Code Feedback",
93
- data=feedback_text, # Use the extracted string here
94
- file_name="code_feedback.txt",
95
- mime="text/plain",
96
- )
97
- st.success("You can download the code feedback as code_feedback.txt")
98
-
99
- # Add button to suggest best practices
100
- if st.button("Suggest Best Practices") and code:
101
- with st.spinner("Getting best practices..."):
102
- best_practices = suggest_best_practices(code)
103
- st.subheader("Best Practices Suggestions:")
104
- st.write(best_practices)
105
-
106
- # Provide download option for best practices suggestions
107
- best_practices_text = (
108
- best_practices
109
- if isinstance(best_practices, str)
110
- else str(best_practices)
111
- )
112
- st.download_button(
113
- label="Download Best Practices Suggestions",
114
- data=best_practices_text,
115
- file_name="best_practices.txt",
116
- mime="text/plain",
117
- )
118
- st.success(
119
- "You can download the best practices suggestions as best_practices.txt"
120
- )
121
-
122
- # Button to trigger error removal
123
- if st.button("Remove Code Errors") and code:
124
- with st.spinner("Removing errors from your code..."):
125
- error_removal_suggestions = remove_code_errors(code)
126
- st.subheader("Error Removal Suggestions:")
127
- st.write(error_removal_suggestions)
128
-
129
- # Provide download option for error removal suggestions
130
- error_removal_text = (
131
- error_removal_suggestions
132
- if isinstance(error_removal_suggestions, str)
133
- else str(error_removal_suggestions)
134
- )
135
- st.download_button(
136
- label="Download Error Removal Suggestions",
137
- data=error_removal_text,
138
- file_name="error_removal_suggestions.txt",
139
- mime="text/plain",
140
- )
141
- st.success(
142
- "You can download the error removal suggestions as error_removal_suggestions.txt"
143
- )
144
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  if __name__ == "__main__":
147
  main()
 
1
+ from dotenv import load_dotenv
2
+ import os
3
+ from docx import Document
4
+ from llama_index.llms.together import TogetherLLM
5
+ from llama_index.core.llms import ChatMessage, MessageRole
6
+ from Bio import Entrez
7
+ import ssl
8
+ from transformers import AutoModelForSequenceClassification, AutoTokenizer
9
  import streamlit as st
10
+ from googleapiclient.discovery import build
11
+ from typing import List, Optional
12
+
13
+ load_dotenv()
14
+ # 995d5f1a8de125c5b39bb48c2613e85f57d53c0e498a87d1ff33f0ec89a26ec7
15
+ os.environ["TOGETHER_API"] = os.getenv("TOGETHER_API")
16
+ os.environ["GOOGLE_SEARCH_API_KEY"] = os.getenv("GOOGLE_SEARCH_API_KEY")
17
+
18
+ def search_pubmed(query: str) -> Optional[List[str]]:
19
+ """
20
+ Searches PubMed for a given query and returns a list of formatted results
21
+ (or None if no results are found).
22
+ """
23
+ Entrez.email = "[email protected]" # Replace with your email
24
+
25
+ try:
26
+ ssl._create_default_https_context = ssl._create_unverified_context
27
+
28
+ handle = Entrez.esearch(db="pubmed", term=query, retmax=3)
29
+ record = Entrez.read(handle)
30
+ id_list = record["IdList"]
31
+
32
+ if not id_list:
33
+ return None
34
+
35
+ handle = Entrez.efetch(db="pubmed", id=id_list, retmode="xml")
36
+ articles = Entrez.read(handle)
37
+
38
+ results = []
39
+ for article in articles['PubmedArticle']:
40
+ try:
41
+ medline_citation = article['MedlineCitation']
42
+ article_data = medline_citation['Article']
43
+ title = article_data['ArticleTitle']
44
+ abstract = article_data.get('Abstract', {}).get('AbstractText', [""])[0]
45
+
46
+ result = f"**Title:** {title}\n**Abstract:** {abstract}\n"
47
+ result += f"**Link:** https://pubmed.ncbi.nlm.nih.gov/{medline_citation['PMID']} \n\n"
48
+ results.append(result)
49
+ except KeyError as e:
50
+ print(f"Error parsing article: {article}, Error: {e}")
51
+
52
+ return results
53
+
54
+ except Exception as e:
55
+ print(f"Error accessing PubMed: {e}")
56
+ return None
57
+
58
+ def chat_with_pubmed(article_text, article_link):
59
+ """
60
+ Engages in a chat-like interaction with a PubMed article using TogetherLLM.
61
+ """
62
+ try:
63
+ llm = TogetherLLM(model="QWEN/QWEN1.5-14B-CHAT", api_key=os.environ['TOGETHER_API'])
64
+ messages = [
65
+ ChatMessage(role=MessageRole.SYSTEM, content="You are a helpful AI assistant summarizing and answering questions about the following medical research article: " + article_link),
66
+ ChatMessage(role=MessageRole.USER, content=article_text)
67
+ ]
68
+ response = llm.chat(messages)
69
+ return str(response) if response else "I'm sorry, I couldn't generate a summary for this article."
70
+ except Exception as e:
71
+ print(f"Error in chat_with_pubmed: {e}")
72
+ return "An error occurred while generating a summary."
73
+
74
+ def search_web(query: str, num_results: int = 3) -> Optional[List[str]]:
75
+ """
76
+ Searches the web using the Google Search API and returns a list of formatted results
77
+ (or None if no results are found).
78
+ """
79
+ try:
80
+ service = build("customsearch", "v1", developerKey=os.environ["GOOGLE_SEARCH_API_KEY"])
81
+
82
+ # Execute the search request
83
+ res = service.cse().list(q=query, cx="e31a5857f45ef4d2a", num=num_results).execute()
84
+
85
+ if "items" not in res:
86
+ return None
87
+
88
+ results = []
89
+ for item in res["items"]:
90
+ title = item["title"]
91
+ link = item["link"]
92
+ snippet = item["snippet"]
93
+ result = f"**Title:** {title}\n**Link:** {link} \n**Snippet:** {snippet}\n\n"
94
+ results.append(result)
95
+
96
+ return results
97
+
98
+ except Exception as e:
99
+ print(f"Error performing web search: {e}")
100
+ return None
101
+
102
+
103
+ from together import Together
104
+ def medmind_chatbot(user_input, chat_history=None):
105
+ """
106
+ Processes user input, interacts with various resources, and generates a response.
107
+ Handles potential errors, maintains chat history,
108
+ """
109
+ if chat_history is None:
110
+ chat_history = []
111
+
112
+ response_parts = [] # Collect responses from different sources
113
+ final_response = "";
114
+
115
+ try:
116
+ # PubMed Search and Chat
117
+ pubmed_results = search_pubmed(user_input)
118
+ if pubmed_results:
119
+ for article_text in pubmed_results:
120
+ title, abstract, link = article_text.split("\n")[:3]
121
+ # print(article_text)
122
+ response_parts.append(f"{title}\n{abstract}\n{link}\n")
123
+ else:
124
+ response_parts.append("No relevant PubMed articles found.")
125
 
126
+ # Web Search
127
+ web_results = search_web(user_input)
128
+ if web_results:
129
+ response_parts.append("\n\n**Web Search Results:**")
130
+ response_parts.extend(web_results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  else:
132
+ response_parts.append("No relevant web search results found.")
133
+
134
+ # Combine response parts into a single string
135
+ response_text = "\n\n".join(response_parts)
136
+
137
+ prompt = f"""You are a Health Assistant AI designed to provide detailed responses to health-related questions.
138
+ Based on the information retrieved from the PubMed and Web Search below, answer the user's query appropriately.
139
+
140
+ - If the user's query is health-related, provide a detailed and helpful response based on the retrieved information. Or if there is
141
+ some previous conversation then answer the health by seeing the previous conversation also.
142
+ - If the query is a general greeting (e.g., 'Hello', 'Hi'), respond as a friendly assistant.
143
+ - If the query is irrelevant or unrelated to health, respond with: 'I am a health assistant. Please ask only health-related questions.'
144
+ - Don't mention in response that where you reterived the information.
145
+ Previous Conversation:
146
+ {chat_history}
147
+
148
+ User's Query: {user_input}
149
+
150
+ Information retrieved from PubMed and Web Search:
151
+ {response_text}
152
+
153
+ Your response:"""
154
+
155
+ client = Together(api_key=os.environ.get('TOGETHER_API'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ response = client.chat.completions.create(
158
+ model="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
159
+ messages=[{"role": "user", "content": prompt}],
160
+ )
161
+
162
+ final_response = response.choices[0].message.content
163
+
164
+ except Exception as e:
165
+ print(f"Error in chatbot: {e}")
166
+ response_text = "An error occurred. Please try again later."
167
+
168
+ chat_history.append((user_input, final_response))
169
+ return final_response, chat_history
170
+
171
+ medmind_chatbot("What are the symptoms of COVID-19?")
172
+
173
+ import gradio as gr
174
+
175
+ def show_info_popup():
176
+ info = """
177
+ **HealthHive is an AI-powered chatbot designed to assist with medical information.**
178
+ ...
179
+ """
180
+ return info
181
+
182
+
183
+ def main():
184
+ # Initialize Gradio Interface
185
+ with gr.Blocks() as demo:
186
+ gr.Markdown("# HealthHive Chatbot")
187
+ gr.Markdown("Ask your medical questions and get reliable information!")
188
+
189
+ # Example Questions (Sidebar)
190
+ gr.Markdown("### Example Questions")
191
+ example_questions = [
192
+ "What are the symptoms of COVID-19?",
193
+ "How can I manage my diabetes?",
194
+ "What are the potential side effects of ibuprofen?",
195
+ "What lifestyle changes can help prevent heart disease?"
196
+ ]
197
+ for question in example_questions:
198
+ gr.Markdown(f"- {question}")
199
+
200
+ # Chat History and User Input
201
+ with gr.Row():
202
+ user_input = gr.Textbox(label="You:", placeholder="Type your medical question here...", lines=2)
203
+ chat_history = gr.State([])
204
+
205
+ # Output Container
206
+ with gr.Row():
207
+ response = gr.Textbox(label="HealthHive:", placeholder="Response will appear here...", interactive=False, lines=10)
208
+ def clear_chat():
209
+ return "", ""
210
+
211
+ # Define function to update chat history and response
212
+ def on_submit(user_input, chat_history):
213
+ result, updated_history = medmind_chatbot(user_input, chat_history)
214
+ info = show_info_popup()
215
+ return result, updated_history, info
216
+
217
+ # Link the submit button to the chatbot function
218
+ gr.Button("Submit").click(on_submit, inputs=[user_input, chat_history], outputs=[response, chat_history])
219
+ # gr.Button("Start New Chat").click(lambda: [], outputs=[chat_history])
220
+ gr.Button("Start New Chat").click(clear_chat, outputs=[user_input, response])
221
+
222
+
223
+ demo.launch()
224
 
225
  if __name__ == "__main__":
226
  main()